Merge tag 'pinctrl-v5.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 21 Jan 2021 19:14:24 +0000 (11:14 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 21 Jan 2021 19:14:24 +0000 (11:14 -0800)
Pull pin control fixes from Linus Walleij:
 "These are all driver fixes, the Qualcomm stuff is the most widely used
  and important:

   - The main matter is a complicated fixup for the Qualcomm deep sleep
     states.

     This manifests in how interrupts get handled or in some cases not
     handled in cooperation with the PDC (Power Domain Controller). It's
     one of these really hardcore bug fixes that signifies high maturity
     of the platform.

   - Fix a register layout problem in the JZ4760 driver

   - Fix a register offset in the Aspeed G6 driver

   - Fix a compiler warning in the Nomadik driver

   - Fix a fallback code path in the mediatek driver"

* tag 'pinctrl-v5.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl:
  pinctrl: qcom: Don't clear pending interrupts when enabling
  pinctrl: qcom: Properly clear "intr_ack_high" interrupts when unmasking
  pinctrl: qcom: No need to read-modify-write the interrupt status
  pinctrl: qcom: Allow SoCs to specify a GPIO function that's not 0
  pinctrl: mediatek: Fix fallback call path
  pinctrl: nomadik: Remove unused variable in nmk_gpio_dbg_show_one
  pinctrl: aspeed: g6: Fix PWMG0 pinctrl setting
  pinctrl: ingenic: Rename registers from JZ4760_GPIO_* to JZ4770_GPIO_*
  pinctrl: ingenic: Fix JZ4760 support

926 files changed:
.mailmap
CREDITS
Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst
Documentation/RCU/Design/Requirements/Requirements.rst
Documentation/admin-guide/binfmt-misc.rst
Documentation/admin-guide/bootconfig.rst
Documentation/admin-guide/kernel-parameters.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/mm/concepts.rst
Documentation/core-api/index.rst
Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml
Documentation/devicetree/bindings/dma/ti/k3-pktdma.yaml
Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
Documentation/devicetree/bindings/net/renesas,etheravb.yaml
Documentation/devicetree/bindings/net/snps,dwmac.yaml
Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt
Documentation/devicetree/bindings/sound/ti,j721e-cpb-audio.yaml
Documentation/devicetree/bindings/sound/ti,j721e-cpb-ivi-audio.yaml
Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
Documentation/doc-guide/sphinx.rst
Documentation/firmware-guide/acpi/apei/einj.rst
Documentation/hwmon/sbtsi_temp.rst
Documentation/kbuild/makefiles.rst
Documentation/kernel-hacking/locking.rst
Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
Documentation/networking/netdev-FAQ.rst
Documentation/networking/netdevices.rst
Documentation/networking/packet_mmap.rst
Documentation/networking/tls-offload.rst
Documentation/process/4.Coding.rst
Documentation/sound/alsa-configuration.rst
Documentation/sound/kernel-api/writing-an-alsa-driver.rst
Documentation/virt/kvm/api.rst
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/include/asm/local64.h [deleted file]
arch/arc/Makefile
arch/arc/boot/Makefile
arch/arc/include/asm/Kbuild
arch/arc/include/asm/page.h
arch/arc/kernel/entry.S
arch/arc/plat-hsdk/Kconfig
arch/arm/boot/dts/omap3-n950-n9.dtsi
arch/arm/boot/dts/picoxcell-pc3x2.dtsi
arch/arm/boot/dts/ste-ux500-samsung-golden.dts
arch/arm/configs/omap2plus_defconfig
arch/arm/crypto/chacha-glue.c
arch/arm/include/asm/Kbuild
arch/arm/mach-omap2/omap_device.c
arch/arm/mach-omap2/pmic-cpcap.c
arch/arm/xen/enlighten.c
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/boot/dts/bitmain/bm1880.dtsi
arch/arm64/include/asm/Kbuild
arch/arm64/include/asm/atomic.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/uaccess.h
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/entry-ftrace.S
arch/arm64/kernel/entry.S
arch/arm64/kernel/perf_event.c
arch/arm64/kernel/probes/kprobes_trampoline.S
arch/arm64/kernel/signal.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/syscall.c
arch/arm64/kernel/traps.c
arch/arm64/kernel/vdso/Makefile
arch/arm64/kernel/vdso/vdso.lds.S
arch/arm64/kvm/Kconfig
arch/arm64/kvm/Makefile
arch/arm64/kvm/arch_timer.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
arch/arm64/kvm/hyp/nvhe/hyp-main.c
arch/arm64/kvm/hyp/nvhe/hyp-smp.c
arch/arm64/kvm/hyp/nvhe/psci-relay.c
arch/arm64/kvm/pmu-emul.c
arch/arm64/kvm/sys_regs.c
arch/arm64/kvm/va_layout.c
arch/arm64/kvm/vgic/vgic-init.c
arch/arm64/kvm/vgic/vgic-v2.c
arch/arm64/kvm/vgic/vgic-v3.c
arch/arm64/mm/init.c
arch/arm64/mm/proc.S
arch/csky/include/asm/Kbuild
arch/h8300/include/asm/Kbuild
arch/hexagon/include/asm/Kbuild
arch/ia64/include/asm/local64.h [deleted file]
arch/ia64/include/asm/sparsemem.h
arch/ia64/mm/init.c
arch/m68k/include/asm/Kbuild
arch/microblaze/include/asm/Kbuild
arch/mips/boot/compressed/decompress.c
arch/mips/cavium-octeon/octeon-irq.c
arch/mips/include/asm/Kbuild
arch/mips/kernel/binfmt_elfn32.c
arch/mips/kernel/binfmt_elfo32.c
arch/mips/kernel/relocate.c
arch/nds32/include/asm/Kbuild
arch/openrisc/include/asm/Kbuild
arch/parisc/include/asm/Kbuild
arch/powerpc/include/asm/Kbuild
arch/powerpc/include/asm/vdso/gettimeofday.h
arch/powerpc/kernel/head_book3s_32.S
arch/powerpc/kernel/vmlinux.lds.S
arch/riscv/Kconfig
arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
arch/riscv/configs/defconfig
arch/riscv/include/asm/Kbuild
arch/riscv/include/asm/pgtable.h
arch/riscv/include/asm/vdso.h
arch/riscv/kernel/cacheinfo.c
arch/riscv/kernel/entry.S
arch/riscv/kernel/setup.c
arch/riscv/kernel/stacktrace.c
arch/riscv/kernel/time.c
arch/riscv/kernel/vdso.c
arch/riscv/mm/init.c
arch/riscv/mm/kasan_init.c
arch/s390/Kconfig
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/include/asm/Kbuild
arch/sh/include/asm/Kbuild
arch/sparc/include/asm/Kbuild
arch/x86/Kconfig
arch/x86/hyperv/hv_init.c
arch/x86/hyperv/mmu.c
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/local64.h [deleted file]
arch/x86/include/asm/mshyperv.h
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/cpu/mtrr/generic.c
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/kernel/sev-es-shared.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/mmu/tdp_mmu.h
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/pgtable.c
arch/x86/xen/enlighten_hvm.c
arch/x86/xen/smp_hvm.c
arch/xtensa/include/asm/Kbuild
block/bfq-iosched.c
block/blk-core.c
block/blk-iocost.c
block/blk-mq-debugfs.c
block/blk-mq.c
block/blk-pm.c
block/blk-pm.h
block/genhd.c
crypto/asymmetric_keys/asym_tpm.c
crypto/asymmetric_keys/public_key.c
crypto/ecdh.c
crypto/xor.c
drivers/acpi/Kconfig
drivers/acpi/internal.h
drivers/acpi/scan.c
drivers/acpi/x86/s2idle.c
drivers/atm/idt77252.c
drivers/base/core.c
drivers/base/regmap/regmap-debugfs.c
drivers/block/Kconfig
drivers/block/rnbd/Kconfig
drivers/block/rnbd/README
drivers/block/rnbd/rnbd-clt.c
drivers/block/rnbd/rnbd-srv.c
drivers/clk/tegra/clk-tegra30.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/powernow-k8.c
drivers/crypto/Kconfig
drivers/dma-buf/dma-buf.c
drivers/dma-buf/heaps/cma_heap.c
drivers/dma/dw-edma/dw-edma-core.c
drivers/dma/idxd/sysfs.c
drivers/dma/mediatek/mtk-hsdma.c
drivers/dma/milbeaut-xdmac.c
drivers/dma/qcom/bam_dma.c
drivers/dma/qcom/gpi.c
drivers/dma/stm32-mdma.c
drivers/dma/ti/k3-udma.c
drivers/dma/xilinx/xilinx_dma.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
drivers/gpu/drm/amd/display/Kconfig
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
drivers/gpu/drm/amd/display/dc/calcs/Makefile
drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/dcn10/Makefile
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/dcn20/Makefile
drivers/gpu/drm/amd/display/dc/dcn21/Makefile
drivers/gpu/drm/amd/display/dc/dcn30/Makefile
drivers/gpu/drm/amd/display/dc/dcn301/Makefile
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
drivers/gpu/drm/amd/display/dc/dcn302/Makefile
drivers/gpu/drm/amd/display/dc/dml/Makefile
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
drivers/gpu/drm/amd/display/dc/dsc/Makefile
drivers/gpu/drm/amd/display/dc/os_types.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
drivers/gpu/drm/drm_plane.c
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/display/icl_dsi.c
drivers/gpu/drm/i915/display/intel_display_types.h
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_panel.c
drivers/gpu/drm/i915/display/vlv_dsi.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gt/gen7_renderclear.c
drivers/gpu/drm/i915/gt/intel_ring_submission.c
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_mitigations.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_mitigations.h [new file with mode: 0644]
drivers/gpu/drm/msm/adreno/a2xx_gpu.c
drivers/gpu/drm/msm/adreno/a3xx_gpu.c
drivers/gpu/drm/msm/adreno/a4xx_gpu.c
drivers/gpu/drm/msm/adreno/adreno_device.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.h
drivers/gpu/drm/msm/dp/dp_display.c
drivers/gpu/drm/msm/dp/dp_panel.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/nouveau/dispnv50/Kbuild
drivers/gpu/drm/nouveau/dispnv50/core.c
drivers/gpu/drm/nouveau/dispnv50/curs.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/dispnv50/disp.h
drivers/gpu/drm/nouveau/dispnv50/wimm.c
drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
drivers/gpu/drm/nouveau/dispnv50/wndw.c
drivers/gpu/drm/nouveau/dispnv50/wndw.h
drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvif/cl0080.h
drivers/gpu/drm/nouveau/include/nvif/class.h
drivers/gpu/drm/nouveau/include/nvkm/core/device.h
drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
drivers/gpu/drm/nouveau/nouveau_backlight.c
drivers/gpu/drm/nouveau/nvif/disp.c
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/rootga102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/ttm/ttm_pool.c
drivers/hid/Kconfig
drivers/hid/amd-sfh-hid/amd_sfh_client.c
drivers/hid/amd-sfh-hid/amd_sfh_hid.h
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-logitech-dj.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-uclogic-params.c
drivers/hid/hid-wiimote-core.c
drivers/hid/wacom_sys.c
drivers/hv/vmbus_drv.c
drivers/hwmon/amd_energy.c
drivers/hwmon/pwm-fan.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-mt65xx.c
drivers/i2c/busses/i2c-sprd.c
drivers/ide/ide-atapi.c
drivers/ide/ide-io.c
drivers/ide/ide-pm.c
drivers/idle/intel_idle.c
drivers/infiniband/core/cma_configfs.c
drivers/infiniband/core/restrack.c
drivers/infiniband/core/ucma.c
drivers/infiniband/core/umem.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
drivers/interconnect/imx/imx.c
drivers/interconnect/imx/imx8mq.c
drivers/interconnect/qcom/Kconfig
drivers/iommu/amd/init.c
drivers/iommu/amd/iommu.c
drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
drivers/iommu/dma-iommu.c
drivers/iommu/intel/dmar.c
drivers/iommu/intel/iommu.c
drivers/iommu/intel/irq_remapping.c
drivers/iommu/intel/svm.c
drivers/iommu/iova.c
drivers/isdn/mISDN/Kconfig
drivers/lightnvm/Kconfig
drivers/md/Kconfig
drivers/md/bcache/features.c
drivers/md/bcache/features.h
drivers/md/bcache/super.c
drivers/md/dm-bufio.c
drivers/md/dm-crypt.c
drivers/md/dm-integrity.c
drivers/md/dm-raid.c
drivers/md/dm-snap.c
drivers/md/dm.c
drivers/misc/habanalabs/common/command_submission.c
drivers/misc/habanalabs/common/device.c
drivers/misc/habanalabs/common/firmware_if.c
drivers/misc/habanalabs/common/habanalabs.h
drivers/misc/habanalabs/common/habanalabs_drv.c
drivers/misc/habanalabs/common/habanalabs_ioctl.c
drivers/misc/habanalabs/common/hw_queue.c
drivers/misc/habanalabs/common/pci.c
drivers/misc/habanalabs/gaudi/gaudi.c
drivers/misc/habanalabs/gaudi/gaudiP.h
drivers/misc/habanalabs/gaudi/gaudi_coresight.c
drivers/misc/habanalabs/goya/goya.c
drivers/misc/habanalabs/include/common/hl_boot_if.h
drivers/misc/pvpanic.c
drivers/net/bareudp.c
drivers/net/can/Kconfig
drivers/net/can/dev.c
drivers/net/can/m_can/m_can.c
drivers/net/can/m_can/tcan4x5x.c
drivers/net/can/rcar/Kconfig
drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
drivers/net/can/vxcan.c
drivers/net/dsa/b53/b53_common.c
drivers/net/dsa/hirschmann/Kconfig
drivers/net/dsa/lantiq_gswip.c
drivers/net/dsa/mv88e6xxx/global1_vtu.c
drivers/net/ethernet/aquantia/Kconfig
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
drivers/net/ethernet/ethoc.c
drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
drivers/net/ethernet/freescale/fs_enet/mii-fec.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/freescale/ucc_geth.h
drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
drivers/net/ethernet/marvell/octeontx2/af/cgx.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
drivers/net/ethernet/mellanox/mlx5/core/lag.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/rdma.c
drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot_net.c
drivers/net/ethernet/natsemi/macsonic.c
drivers/net/ethernet/natsemi/xtsonic.c
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
drivers/net/ethernet/qlogic/Kconfig
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qede/qede_fp.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
drivers/net/ethernet/stmicro/stmmac/dwmac5.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
drivers/net/ethernet/ti/cpts.c
drivers/net/ipa/gsi.c
drivers/net/ipa/ipa_clock.c
drivers/net/ipa/ipa_modem.c
drivers/net/mdio/mdio-bitbang.c
drivers/net/phy/smsc.c
drivers/net/ppp/ppp_generic.c
drivers/net/tun.c
drivers/net/usb/Kconfig
drivers/net/usb/cdc_ether.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/r8153_ecm.c
drivers/net/usb/rndis_host.c
drivers/net/virtio_net.c
drivers/net/wan/Kconfig
drivers/net/wan/hdlc_ppp.c
drivers/net/wireless/ath/ath11k/core.c
drivers/net/wireless/ath/ath11k/dp_rx.c
drivers/net/wireless/ath/ath11k/mac.c
drivers/net/wireless/ath/ath11k/pci.c
drivers/net/wireless/ath/ath11k/pci.h
drivers/net/wireless/ath/ath11k/peer.c
drivers/net/wireless/ath/ath11k/peer.h
drivers/net/wireless/ath/ath11k/qmi.c
drivers/net/wireless/ath/ath11k/qmi.h
drivers/net/wireless/ath/ath11k/wmi.c
drivers/net/wireless/ath/wil6210/Kconfig
drivers/net/wireless/mediatek/mt76/mt7915/init.c
drivers/net/wireless/mediatek/mt76/sdio.c
drivers/net/wireless/mediatek/mt76/usb.c
drivers/net/wireless/realtek/rtlwifi/core.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/tcp.c
drivers/nvme/target/fcloop.c
drivers/nvme/target/rdma.c
drivers/opp/core.c
drivers/perf/arm_pmu.c
drivers/ptp/Kconfig
drivers/regulator/Kconfig
drivers/regulator/bd718x7-regulator.c
drivers/regulator/pf8x00-regulator.c
drivers/regulator/qcom-rpmh-regulator.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/cxgbi/cxgb4i/Kconfig
drivers/scsi/hisi_sas/hisi_sas.h
drivers/scsi/hisi_sas/hisi_sas_main.c
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/mpt3sas/Kconfig
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/qedi/qedi_main.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_transport_spi.c
drivers/scsi/sd.c
drivers/scsi/ufs/ufs-mediatek-trace.h
drivers/scsi/ufs/ufs-mediatek.c
drivers/scsi/ufs/ufs-mediatek.h
drivers/scsi/ufs/ufs.h
drivers/scsi/ufs/ufshcd-pci.c
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/spi/spi-altera.c
drivers/spi/spi-cadence.c
drivers/spi/spi-fsl-spi.c
drivers/spi/spi-geni-qcom.c
drivers/spi/spi-stm32.c
drivers/spi/spi.c
drivers/staging/comedi/comedi_fops.c
drivers/staging/hikey9xx/hisi-spmi-controller.c
drivers/staging/media/atomisp/pci/atomisp_subdev.c
drivers/staging/mt7621-dma/mtk-hsdma.c
drivers/target/target_core_xcopy.c
drivers/target/target_core_xcopy.h
drivers/tty/Kconfig
drivers/tty/Makefile
drivers/tty/serial/sifive.c
drivers/tty/ttynull.c
drivers/usb/chipidea/ci_hdrc_imx.c
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-wdm.c
drivers/usb/class/usblp.c
drivers/usb/core/hcd.c
drivers/usb/dwc3/core.h
drivers/usb/dwc3/dwc3-meson-g12a.c
drivers/usb/dwc3/gadget.c
drivers/usb/dwc3/ulpi.c
drivers/usb/gadget/Kconfig
drivers/usb/gadget/composite.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/function/f_printer.c
drivers/usb/gadget/function/f_uac2.c
drivers/usb/gadget/function/u_ether.c
drivers/usb/gadget/legacy/acm_ms.c
drivers/usb/gadget/udc/Kconfig
drivers/usb/gadget/udc/Makefile
drivers/usb/gadget/udc/core.c
drivers/usb/gadget/udc/dummy_hcd.c
drivers/usb/gadget/udc/fsl_mxc_udc.c [deleted file]
drivers/usb/host/xhci.c
drivers/usb/misc/yurex.c
drivers/usb/serial/iuu_phoenix.c
drivers/usb/serial/option.c
drivers/usb/storage/unusual_uas.h
drivers/usb/typec/altmodes/Kconfig
drivers/usb/typec/class.c
drivers/usb/typec/mux/intel_pmc_mux.c
drivers/usb/usbip/vhci_hcd.c
drivers/vhost/net.c
drivers/vhost/vsock.c
drivers/xen/events/events_base.c
drivers/xen/platform-pci.c
drivers/xen/privcmd.c
drivers/xen/xenbus/xenbus.h
drivers/xen/xenbus/xenbus_comms.c
drivers/xen/xenbus/xenbus_probe.c
fs/afs/dir.c
fs/afs/dir_edit.c
fs/afs/xdr_fs.h
fs/block_dev.c
fs/btrfs/backref.c
fs/btrfs/block-group.c
fs/btrfs/btrfs_inode.h
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/dev-replace.c
fs/btrfs/discard.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file-item.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/print-tree.c
fs/btrfs/print-tree.h
fs/btrfs/qgroup.c
fs/btrfs/reflink.c
fs/btrfs/relocation.c
fs/btrfs/send.c
fs/btrfs/space-info.c
fs/btrfs/super.c
fs/btrfs/tests/btrfs-tests.c
fs/btrfs/tests/inode-tests.c
fs/btrfs/transaction.c
fs/btrfs/tree-checker.c
fs/btrfs/volumes.c
fs/cachefiles/rdwr.c
fs/ceph/mds_client.c
fs/cifs/connect.c
fs/cifs/dfs_cache.c
fs/cifs/fs_context.c
fs/cifs/smb2pdu.c
fs/cifs/smb2pdu.h
fs/ext4/ext4_jbd2.c
fs/ext4/ext4_jbd2.h
fs/ext4/fast_commit.c
fs/ext4/file.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/namei.c
fs/ext4/resize.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/file.c
fs/io_uring.c
fs/namespace.c
fs/nfs/delegation.c
fs/nfs/internal.h
fs/nfs/nfs4proc.c
fs/nfs/nfs4super.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfs/pnfs_nfs.c
fs/nfsd/nfs3xdr.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4xdr.c
fs/nfsd/nfssvc.c
fs/nfsd/xdr4.h
fs/notify/fanotify/fanotify_user.c
fs/proc/task_mmu.c
fs/select.c
fs/zonefs/Kconfig
include/asm-generic/Kbuild
include/asm-generic/bitops/atomic.h
include/kvm/arm_pmu.h
include/linux/acpi.h
include/linux/blk-mq.h
include/linux/blkdev.h
include/linux/build_bug.h
include/linux/ceph/msgr.h
include/linux/compiler-gcc.h
include/linux/compiler_attributes.h
include/linux/compiler_types.h
include/linux/console.h
include/linux/dm-bufio.h
include/linux/intel-iommu.h
include/linux/kasan.h
include/linux/kcov.h
include/linux/kdev_t.h
include/linux/mdio-bitbang.h
include/linux/memcontrol.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm.h
include/linux/perf/arm_pmu.h
include/linux/rcupdate.h
include/linux/sizes.h
include/linux/skbuff.h
include/linux/syscalls.h
include/linux/usb/usbnet.h
include/net/cfg80211.h
include/net/inet_connection_sock.h
include/net/mac80211.h
include/net/red.h
include/net/sock.h
include/net/xdp_sock.h
include/net/xsk_buff_pool.h
include/soc/nps/common.h [deleted file]
include/soc/nps/mtm.h [deleted file]
include/trace/events/afs.h
include/trace/events/sunrpc.h
include/uapi/linux/bcache.h
include/uapi/linux/if_link.h
include/uapi/linux/kvm.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/ppp-ioctl.h
include/uapi/misc/habanalabs.h
include/xen/xenbus.h
init/main.c
kernel/bpf/bpf_inode_storage.c
kernel/bpf/bpf_task_storage.c
kernel/bpf/btf.c
kernel/bpf/cgroup.c
kernel/bpf/hashtab.c
kernel/bpf/helpers.c
kernel/bpf/syscall.c
kernel/bpf/task_iter.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup-v1.c
kernel/cgroup/cgroup.c
kernel/configs/android-recommended.config
kernel/exit.c
kernel/rcu/tasks.h
kernel/signal.c
kernel/trace/Kconfig
kernel/trace/trace_kprobe.c
kernel/workqueue.c
lib/Kconfig.debug
lib/fonts/font_ter16x32.c
lib/genalloc.c
lib/iov_iter.c
lib/raid6/Makefile
lib/zlib_dfltcc/Makefile
lib/zlib_dfltcc/dfltcc.c
lib/zlib_dfltcc/dfltcc_deflate.c
lib/zlib_dfltcc/dfltcc_inflate.c
lib/zlib_dfltcc/dfltcc_syms.c [deleted file]
mm/hugetlb.c
mm/kasan/generic.c
mm/kasan/init.c
mm/memory-failure.c
mm/memory.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/mremap.c
mm/page-writeback.c
mm/page_alloc.c
mm/process_vm_access.c
mm/slub.c
mm/vmalloc.c
mm/vmscan.c
net/8021q/vlan.c
net/bpf/test_run.c
net/can/isotp.c
net/ceph/messenger_v2.c
net/core/dev.c
net/core/devlink.c
net/core/gen_estimator.c
net/core/neighbour.c
net/core/net-sysfs.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock_reuseport.c
net/dcb/dcbnl.c
net/dsa/dsa2.c
net/dsa/master.c
net/ipv4/esp4.c
net/ipv4/fib_frontend.c
net/ipv4/gre_demux.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_output.c
net/ipv4/ip_tunnel.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_rpfilter.c
net/ipv4/nexthop.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/esp6.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_output.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/sit.c
net/lapb/lapb_iface.c
net/mac80211/debugfs.c
net/mac80211/rx.c
net/mac80211/tx.c
net/mptcp/protocol.c
net/ncsi/ncsi-rsp.c
net/netfilter/ipset/ip_set_hash_gen.h
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_nat_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_dynset.c
net/netfilter/xt_RATEEST.c
net/nfc/nci/core.c
net/packet/af_packet.c
net/qrtr/ns.c
net/qrtr/qrtr.c
net/qrtr/qrtr.h
net/rxrpc/input.c
net/rxrpc/key.c
net/sched/cls_flower.c
net/sched/cls_tcindex.c
net/sched/sch_api.c
net/sched/sch_choke.c
net/sched/sch_gred.c
net/sched/sch_red.c
net/sched/sch_sfq.c
net/sched/sch_taprio.c
net/smc/smc_core.c
net/smc/smc_ib.c
net/smc/smc_ism.c
net/sunrpc/addr.c
net/sunrpc/svc_xprt.c
net/sunrpc/svcsock.c
net/tipc/link.c
net/tipc/node.c
net/wireless/Kconfig
net/wireless/reg.c
net/xdp/xsk.c
net/xdp/xsk_buff_pool.c
net/xdp/xsk_queue.h
scripts/checkpatch.pl
scripts/config
scripts/depmod.sh
scripts/gcc-plugins/Makefile
scripts/kconfig/Makefile
scripts/kconfig/mconf-cfg.sh
security/lsm_audit.c
sound/firewire/fireface/ff-transaction.c
sound/firewire/tascam/tascam-transaction.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_tegra.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_via.c
sound/soc/amd/raven/pci-acp3x.c
sound/soc/amd/renoir/rn-pci-acp3x.c
sound/soc/atmel/Kconfig
sound/soc/codecs/Kconfig
sound/soc/codecs/max98373-i2c.c
sound/soc/codecs/max98373-sdw.c
sound/soc/codecs/max98373.c
sound/soc/codecs/max98373.h
sound/soc/codecs/rt711.c
sound/soc/fsl/imx-hdmi.c
sound/soc/intel/boards/haswell.c
sound/soc/intel/skylake/cnl-sst.c
sound/soc/meson/axg-tdm-interface.c
sound/soc/meson/axg-tdmin.c
sound/soc/qcom/lpass-cpu.c
sound/soc/qcom/lpass-platform.c
sound/soc/sh/rcar/adg.c
sound/soc/soc-dapm.c
sound/soc/sof/Kconfig
sound/usb/card.c
sound/usb/card.h
sound/usb/endpoint.c
sound/usb/endpoint.h
sound/usb/implicit.c
sound/usb/midi.c
sound/usb/quirks-table.h
sound/usb/quirks.c
sound/usb/usbaudio.h
tools/bootconfig/scripts/bconf2ftrace.sh
tools/bootconfig/scripts/ftrace2bconf.sh
tools/bpf/bpftool/net.c
tools/bpf/resolve_btfids/main.c
tools/include/linux/build_bug.h
tools/include/uapi/linux/kvm.h
tools/lib/bpf/btf.c
tools/lib/perf/tests/test-cpumap.c
tools/lib/perf/tests/test-evlist.c
tools/lib/perf/tests/test-evsel.c
tools/lib/perf/tests/test-threadmap.c
tools/perf/examples/bpf/5sec.c
tools/perf/tests/shell/stat+shadow_stat.sh
tools/perf/util/header.c
tools/perf/util/machine.c
tools/perf/util/session.c
tools/perf/util/stat-shadow.c
tools/testing/kunit/kunit_kernel.py
tools/testing/selftests/Makefile
tools/testing/selftests/arm64/fp/fpsimd-test.S
tools/testing/selftests/arm64/fp/sve-test.S
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/prog_tests/test_local_storage.c
tools/testing/selftests/bpf/progs/bprm_opts.c
tools/testing/selftests/bpf/progs/local_storage.c
tools/testing/selftests/bpf/test_maps.c
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/bpf/verifier/spill_fill.c
tools/testing/selftests/bpf/xdpxceiver.c
tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/demand_paging_test.c
tools/testing/selftests/kvm/dirty_log_perf_test.c
tools/testing/selftests/kvm/dirty_log_test.c
tools/testing/selftests/kvm/include/guest_modes.h [new file with mode: 0644]
tools/testing/selftests/kvm/include/kvm_util.h
tools/testing/selftests/kvm/include/perf_test_util.h
tools/testing/selftests/kvm/lib/guest_modes.c [new file with mode: 0644]
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/perf_test_util.c [new file with mode: 0644]
tools/testing/selftests/net/fib_nexthops.sh
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/net/pmtu.sh
tools/testing/selftests/net/tls.c
tools/testing/selftests/net/udpgro.sh
tools/testing/selftests/netfilter/Makefile
tools/testing/selftests/netfilter/ipip-conntrack-mtu.sh [new file with mode: 0755]
tools/testing/selftests/netfilter/nft_conntrack_helper.sh
tools/testing/selftests/vDSO/.gitignore
tools/testing/selftests/vDSO/vdso_test_correctness.c
tools/testing/selftests/vm/Makefile
tools/testing/selftests/wireguard/qemu/debug.config
virt/kvm/kvm_main.c

index 632700cee55cdaeaea8d6765037150f493271c8f..b1ab0129c7d6b034b1e41355892afdebf60ebdb8 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -55,6 +55,8 @@ Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
 Ben Gardner <bgardner@wabtec.com>
 Ben M Cahill <ben.m.cahill@intel.com>
 Björn Steinbrink <B.Steinbrink@gmx.de>
+Björn Töpel <bjorn@kernel.org> <bjorn.topel@gmail.com>
+Björn Töpel <bjorn@kernel.org> <bjorn.topel@intel.com>
 Boris Brezillon <bbrezillon@kernel.org> <b.brezillon.dev@gmail.com>
 Boris Brezillon <bbrezillon@kernel.org> <b.brezillon@overkiz.com>
 Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@bootlin.com>
diff --git a/CREDITS b/CREDITS
index 090ed4b004a5b216c821ec820f1440c35c253402..9add7e6a4fa022ef1b06818bed57392ef3def9b5 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -710,6 +710,10 @@ S: Las Cuevas 2385 - Bo Guemes
 S: Las Heras, Mendoza CP 5539
 S: Argentina
 
+N: Jay Cliburn
+E: jcliburn@gmail.com
+D: ATLX Ethernet drivers
+
 N: Steven P. Cole
 E: scole@lanl.gov
 E: elenstev@mesatop.com
@@ -1284,6 +1288,10 @@ D: Major kbuild rework during the 2.5 cycle
 D: ISDN Maintainer
 S: USA
 
+N: Gerrit Renker
+E: gerrit@erg.abdn.ac.uk
+D: DCCP protocol support.
+
 N: Philip Gladstone
 E: philip@gladstonefamily.net
 D: Kernel / timekeeping stuff
@@ -2138,6 +2146,10 @@ E: seasons@falcon.sch.bme.hu
 E: seasons@makosteszta.sote.hu
 D: Original author of software suspend
 
+N: Alexey Kuznetsov
+E: kuznet@ms2.inr.ac.ru
+D: Author and maintainer of large parts of the networking stack
+
 N: Jaroslav Kysela
 E: perex@perex.cz
 W: https://www.perex.cz
@@ -2696,6 +2708,10 @@ N: Wolfgang Muees
 E: wolfgang@iksw-muees.de
 D: Auerswald USB driver
 
+N: Shrijeet Mukherjee
+E: shrijeet@gmail.com
+D: Network routing domains (VRF).
+
 N: Paul Mundt
 E: paul.mundt@gmail.com
 D: SuperH maintainer
@@ -4110,6 +4126,10 @@ S: B-1206 Jingmao Guojigongyu
 S: 16 Baliqiao Nanjie, Beijing 101100
 S: People's Repulic of China
 
+N: Aviad Yehezkel
+E: aviadye@nvidia.com
+D: Kernel TLS implementation and offload support.
+
 N: Victor Yodaiken
 E: yodaiken@fsmlabs.com
 D: RTLinux (RealTime Linux)
@@ -4167,6 +4187,10 @@ S: 1507 145th Place SE #B5
 S: Bellevue, Washington 98007
 S: USA
 
+N: Wensong Zhang
+E: wensong@linux-vs.org
+D: IP virtual server (IPVS).
+
 N: Haojian Zhuang
 E: haojian.zhuang@gmail.com
 D: MMP support
index 83ae3b79a6439eab57cda595ed77748f39b69ca9..a648b423ba0eb40b5be84f63f308888d8738f568 100644 (file)
@@ -473,7 +473,7 @@ read-side critical sections that follow the idle period (the oval near
 the bottom of the diagram above).
 
 Plumbing this into the full grace-period execution is described
-`below <#Forcing%20Quiescent%20States>`__.
+`below <Forcing Quiescent States_>`__.
 
 CPU-Hotplug Interface
 ^^^^^^^^^^^^^^^^^^^^^
@@ -494,7 +494,7 @@ mask to detect CPUs having gone offline since the beginning of this
 grace period.
 
 Plumbing this into the full grace-period execution is described
-`below <#Forcing%20Quiescent%20States>`__.
+`below <Forcing Quiescent States_>`__.
 
 Forcing Quiescent States
 ^^^^^^^^^^^^^^^^^^^^^^^^
@@ -532,7 +532,7 @@ from other CPUs.
 | RCU. But this diagram is complex enough as it is, so simplicity       |
 | overrode accuracy. You can think of it as poetic license, or you can  |
 | think of it as misdirection that is resolved in the                   |
-| `stitched-together diagram <#Putting%20It%20All%20Together>`__.       |
+| `stitched-together diagram <Putting It All Together_>`__.             |
 +-----------------------------------------------------------------------+
 
 Grace-Period Cleanup
@@ -596,7 +596,7 @@ maintain ordering. For example, if the callback function wakes up a task
 that runs on some other CPU, proper ordering must in place in both the
 callback function and the task being awakened. To see why this is
 important, consider the top half of the `grace-period
-cleanup <#Grace-Period%20Cleanup>`__ diagram. The callback might be
+cleanup`_ diagram. The callback might be
 running on a CPU corresponding to the leftmost leaf ``rcu_node``
 structure, and awaken a task that is to run on a CPU corresponding to
 the rightmost leaf ``rcu_node`` structure, and the grace-period kernel
index e8c84fcc050716af8f19f326962e5c11b392fa41..d4c9a016074b3c7476819d291cb8105597d3760b 100644 (file)
@@ -45,7 +45,7 @@ requirements:
 #. `Other RCU Flavors`_
 #. `Possible Future Changes`_
 
-This is followed by a `summary <#Summary>`__, however, the answers to
+This is followed by a summary_, however, the answers to
 each quick quiz immediately follows the quiz. Select the big white space
 with your mouse to see the answer.
 
@@ -1096,7 +1096,7 @@ memory barriers.
 | case, voluntary context switch) within an RCU read-side critical      |
 | section. However, sleeping locks may be used within userspace RCU     |
 | read-side critical sections, and also within Linux-kernel sleepable   |
-| RCU `(SRCU) <#Sleepable%20RCU>`__ read-side critical sections. In     |
+| RCU `(SRCU) <Sleepable RCU_>`__ read-side critical sections. In       |
 | addition, the -rt patchset turns spinlocks into a sleeping locks so   |
 | that the corresponding critical sections can be preempted, which also |
 | means that these sleeplockified spinlocks (but not other sleeping     |
@@ -1186,7 +1186,7 @@ non-preemptible (``CONFIG_PREEMPT=n``) kernels, and thus `tiny
 RCU <https://lkml.kernel.org/g/20090113221724.GA15307@linux.vnet.ibm.com>`__
 was born. Josh Triplett has since taken over the small-memory banner
 with his `Linux kernel tinification <https://tiny.wiki.kernel.org/>`__
-project, which resulted in `SRCU <#Sleepable%20RCU>`__ becoming optional
+project, which resulted in `SRCU <Sleepable RCU_>`__ becoming optional
 for those kernels not needing it.
 
 The remaining performance requirements are, for the most part,
@@ -1457,8 +1457,8 @@ will vary as the value of ``HZ`` varies, and can also be changed using
 the relevant Kconfig options and kernel boot parameters. RCU currently
 does not do much sanity checking of these parameters, so please use
 caution when changing them. Note that these forward-progress measures
-are provided only for RCU, not for `SRCU <#Sleepable%20RCU>`__ or `Tasks
-RCU <#Tasks%20RCU>`__.
+are provided only for RCU, not for `SRCU <Sleepable RCU_>`__ or `Tasks
+RCU`_.
 
 RCU takes the following steps in ``call_rcu()`` to encourage timely
 invocation of callbacks when any given non-\ ``rcu_nocbs`` CPU has
@@ -1477,8 +1477,8 @@ encouragement was provided:
 
 Again, these are default values when running at ``HZ=1000``, and can be
 overridden. Again, these forward-progress measures are provided only for
-RCU, not for `SRCU <#Sleepable%20RCU>`__ or `Tasks
-RCU <#Tasks%20RCU>`__. Even for RCU, callback-invocation forward
+RCU, not for `SRCU <Sleepable RCU_>`__ or `Tasks
+RCU`_. Even for RCU, callback-invocation forward
 progress for ``rcu_nocbs`` CPUs is much less well-developed, in part
 because workloads benefiting from ``rcu_nocbs`` CPUs tend to invoke
 ``call_rcu()`` relatively infrequently. If workloads emerge that need
@@ -1920,7 +1920,7 @@ Hotplug CPU
 
 The Linux kernel supports CPU hotplug, which means that CPUs can come
 and go. It is of course illegal to use any RCU API member from an
-offline CPU, with the exception of `SRCU <#Sleepable%20RCU>`__ read-side
+offline CPU, with the exception of `SRCU <Sleepable RCU_>`__ read-side
 critical sections. This requirement was present from day one in
 DYNIX/ptx, but on the other hand, the Linux kernel's CPU-hotplug
 implementation is “interesting.”
@@ -2177,7 +2177,7 @@ handles these states differently:
 However, RCU must be reliably informed as to whether any given CPU is
 currently in the idle loop, and, for ``NO_HZ_FULL``, also whether that
 CPU is executing in usermode, as discussed
-`earlier <#Energy%20Efficiency>`__. It also requires that the
+`earlier <Energy Efficiency_>`__. It also requires that the
 scheduling-clock interrupt be enabled when RCU needs it to be:
 
 #. If a CPU is either idle or executing in usermode, and RCU believes it
@@ -2294,7 +2294,7 @@ Performance, Scalability, Response Time, and Reliability
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Expanding on the `earlier
-discussion <#Performance%20and%20Scalability>`__, RCU is used heavily by
+discussion <Performance and Scalability_>`__, RCU is used heavily by
 hot code paths in performance-critical portions of the Linux kernel's
 networking, security, virtualization, and scheduling code paths. RCU
 must therefore use efficient implementations, especially in its
index 7a864131e5ea767280f9448683dd332b71f1af10..59cd902e35497468f29f68b112f579f9b5de002b 100644 (file)
@@ -23,7 +23,7 @@ Here is what the fields mean:
 
 - ``name``
    is an identifier string. A new /proc file will be created with this
-   ``name below /proc/sys/fs/binfmt_misc``; cannot contain slashes ``/`` for
+   name below ``/proc/sys/fs/binfmt_misc``; cannot contain slashes ``/`` for
    obvious reasons.
 - ``type``
    is the type of recognition. Give ``M`` for magic and ``E`` for extension.
@@ -83,7 +83,7 @@ Here is what the fields mean:
       ``F`` - fix binary
             The usual behaviour of binfmt_misc is to spawn the
            binary lazily when the misc format file is invoked.  However,
-           this doesn``t work very well in the face of mount namespaces and
+           this doesn't work very well in the face of mount namespaces and
            changeroots, so the ``F`` mode opens the binary as soon as the
            emulation is installed and uses the opened image to spawn the
            emulator, meaning it is always available once installed,
index 9b90efcc3a35e923b37fec54879e132adf8ae3ca..452b7dcd7f6be38ec68f1a234b6eb650fbd3778d 100644 (file)
@@ -154,7 +154,7 @@ get the boot configuration data.
 Because of this "piggyback" method, there is no need to change or
 update the boot loader and the kernel image itself as long as the boot
 loader passes the correct initrd file size. If by any chance, the boot
-loader passes a longer size, the kernel feils to find the bootconfig data.
+loader passes a longer size, the kernel fails to find the bootconfig data.
 
 To do this operation, Linux kernel provides "bootconfig" command under
 tools/bootconfig, which allows admin to apply or delete the config file
index 06fb1b4aa849cc8e012d6c3060d89f08cb6fe522..682ab28b5c94b3d292d085173adcb1f2ebce6194 100644 (file)
@@ -3,8 +3,8 @@
 The kernel's command-line parameters
 ====================================
 
-The following is a consolidated list of the kernel parameters as
-implemented by the __setup(), core_param() and module_param() macros
+The following is a consolidated list of the kernel parameters as implemented
+by the __setup(), early_param(), core_param() and module_param() macros
 and sorted into English Dictionary order (defined as ignoring all
 punctuation and sorting digits before letters in a case insensitive
 manner), and with descriptions where known.
index c722ec19cd00485ceb9ba4dfce2cf9c5d2deb1d0..a10b545c2070a54455c3e7181cc43135dc68f611 100644 (file)
 
        ftrace_filter=[function-list]
                        [FTRACE] Limit the functions traced by the function
-                       tracer at boot up. function-list is a comma separated
+                       tracer at boot up. function-list is a comma-separated
                        list of functions. This list can be changed at run
                        time by the set_ftrace_filter file in the debugfs
                        tracing directory.
        ftrace_graph_filter=[function-list]
                        [FTRACE] Limit the top level callers functions traced
                        by the function graph tracer at boot up.
-                       function-list is a comma separated list of functions
+                       function-list is a comma-separated list of functions
                        that can be changed at run time by the
                        set_graph_function file in the debugfs tracing directory.
 
        ftrace_graph_notrace=[function-list]
                        [FTRACE] Do not trace from the functions specified in
-                       function-list.  This list is a comma separated list of
+                       function-list.  This list is a comma-separated list of
                        functions that can be changed at run time by the
                        set_graph_notrace file in the debugfs tracing directory.
 
                        when set.
                        Format: <int>
 
-       libata.force=   [LIBATA] Force configurations.  The format is comma
+       libata.force=   [LIBATA] Force configurations.  The format is comma-
                        separated list of "[ID:]VAL" where ID is
                        PORT[.DEVICE].  PORT and DEVICE are decimal numbers
                        matching port, link or device.  Basically, it matches
 
        stacktrace_filter=[function-list]
                        [FTRACE] Limit the functions that the stack tracer
-                       will trace at boot up. function-list is a comma separated
+                       will trace at boot up. function-list is a comma-separated
                        list of functions. This list can be changed at run
                        time by the stack_trace_filter file in the debugfs
                        tracing directory. Note, this enables stack tracing
        trace_event=[event-list]
                        [FTRACE] Set and start specified trace events in order
                        to facilitate early boot debugging. The event-list is a
-                       comma separated list of trace events to enable. See
+                       comma-separated list of trace events to enable. See
                        also Documentation/trace/events.rst
 
        trace_options=[option-list]
                        This option is obsoleted by the "nopv" option, which
                        has equivalent effect for XEN platform.
 
+       xen_no_vector_callback
+                       [KNL,X86,XEN] Disable the vector callback for Xen
+                       event channel interrupts.
+
        xen_scrub_pages=        [XEN]
                        Boolean option to control scrubbing pages before giving them back
                        to Xen, for use by other domains. Can be also changed at runtime
index fa0974fbeae7c770d125ff72bc2d67b189ddcbaa..b966fcff993b26c9d2854b155f3c7811f851447b 100644 (file)
@@ -184,7 +184,7 @@ pages either asynchronously or synchronously, depending on the state
 of the system. When the system is not loaded, most of the memory is free
 and allocation requests will be satisfied immediately from the free
 pages supply. As the load increases, the amount of the free pages goes
-down and when it reaches a certain threshold (high watermark), an
+down and when it reaches a certain threshold (low watermark), an
 allocation request will awaken the ``kswapd`` daemon. It will
 asynchronously scan memory pages and either just free them if the data
 they contain is available elsewhere, or evict to the backing storage
index 69171b1799f21464322ff150a41e3b39e4978b2b..f1c9d20bd42ddb1854fff0d7ee4ba5b388cd02d9 100644 (file)
@@ -53,7 +53,6 @@ How Linux keeps everything from happening at the same time.  See
 .. toctree::
    :maxdepth: 1
 
-   atomic_ops
    refcount-vs-atomic
    irq/index
    local_ops
index b15f68c499cb2396a1a8626049ff82d5b6744a45..df29d59d13a8dc4671dcbe4144ea6180cc1991d3 100644 (file)
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2020 Texas Instruments Incorporated
+# Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
 %YAML 1.2
 ---
 $id: http://devicetree.org/schemas/dma/ti/k3-bcdma.yaml#
@@ -7,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Texas Instruments K3 DMSS BCDMA Device Tree Bindings
 
 maintainers:
-  - Peter Ujfalusi <peter.ujfalusi@ti.com>
+  - Peter Ujfalusi <peter.ujfalusi@gmail.com>
 
 description: |
   The Block Copy DMA (BCDMA) is intended to perform similar functions as the TR
index b13ab60cd740f52ffaddc37cbfc192fa10911822..ea19d12a9337e8e385050858bab4e1387eeaf613 100644 (file)
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2020 Texas Instruments Incorporated
+# Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
 %YAML 1.2
 ---
 $id: http://devicetree.org/schemas/dma/ti/k3-pktdma.yaml#
@@ -7,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Texas Instruments K3 DMSS PKTDMA Device Tree Bindings
 
 maintainers:
-  - Peter Ujfalusi <peter.ujfalusi@ti.com>
+  - Peter Ujfalusi <peter.ujfalusi@gmail.com>
 
 description: |
   The Packet DMA (PKTDMA) is intended to perform similar functions as the packet
index 9a87fd9041eba1efb7333d86c8ebf4a1fa92cf40..6a09bbf83d4629215b62fba92c4eeaa2520b1e9f 100644 (file)
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2019 Texas Instruments Incorporated
+# Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
 %YAML 1.2
 ---
 $id: http://devicetree.org/schemas/dma/ti/k3-udma.yaml#
@@ -7,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Texas Instruments K3 NAVSS Unified DMA Device Tree Bindings
 
 maintainers:
-  - Peter Ujfalusi <peter.ujfalusi@ti.com>
+  - Peter Ujfalusi <peter.ujfalusi@gmail.com>
 
 description: |
   The UDMA-P is intended to perform similar (but significantly upgraded)
index 244befb6402aa8b4cd28641e9dc8236547bf1b62..de9dd574a2f954a3ba3f59406db98952c0acfcb2 100644 (file)
@@ -163,6 +163,7 @@ allOf:
             enum:
               - renesas,etheravb-r8a774a1
               - renesas,etheravb-r8a774b1
+              - renesas,etheravb-r8a774e1
               - renesas,etheravb-r8a7795
               - renesas,etheravb-r8a7796
               - renesas,etheravb-r8a77961
index b2f6083f556af0d6b789d8b340a8699152bac912..dfbf5fe4547ab611a7004ee2baeb4914d93ce61e 100644 (file)
@@ -161,7 +161,8 @@ properties:
             * snps,route-dcbcp, DCB Control Packets
             * snps,route-up, Untagged Packets
             * snps,route-multi-broad, Multicast & Broadcast Packets
-          * snps,priority, RX queue priority (Range 0x0 to 0xF)
+          * snps,priority, bitmask of the tagged frames priorities assigned to
+            the queue
 
   snps,mtl-tx-config:
     $ref: /schemas/types.yaml#/definitions/phandle
@@ -188,7 +189,10 @@ properties:
             * snps,idle_slope, unlock on WoL
             * snps,high_credit, max write outstanding req. limit
             * snps,low_credit, max read outstanding req. limit
-          * snps,priority, TX queue priority (Range 0x0 to 0xF)
+          * snps,priority, bitmask of the priorities assigned to the queue.
+            When a PFC frame is received with priorities matching the bitmask,
+            the queue is blocked from transmitting for the pause time specified
+            in the PFC frame.
 
   snps,reset-gpio:
     deprecated: true
index a6c259ce97850c6dc196b40d38f6a81dfa63bf9a..956156fe52a3eb6040a44d8a4d393cf25b857484 100644 (file)
@@ -19,7 +19,9 @@ description: |
 properties:
   compatible:
     enum:
-      - nxp,pf8x00
+      - nxp,pf8100
+      - nxp,pf8121a
+      - nxp,pf8200
 
   reg:
     maxItems: 1
@@ -118,7 +120,7 @@ examples:
         #size-cells = <0>;
 
         pmic@8 {
-            compatible = "nxp,pf8x00";
+            compatible = "nxp,pf8100";
             reg = <0x08>;
 
             regulators {
index b8f0b7809c021df7fad746b89b4dc35a5d651a3f..7d462b899473ad726d184c4512d80804b077d180 100644 (file)
@@ -44,6 +44,7 @@ First Level Nodes - PMIC
        Definition: Must be one of below:
                    "qcom,pm8005-rpmh-regulators"
                    "qcom,pm8009-rpmh-regulators"
+                   "qcom,pm8009-1-rpmh-regulators"
                    "qcom,pm8150-rpmh-regulators"
                    "qcom,pm8150l-rpmh-regulators"
                    "qcom,pm8350-rpmh-regulators"
index 805da4d6a88ed53c2592e9c4cf0adf42f8320a15..ec06789b21dfc4ccd1b90cf96ba56374cd8848c0 100644 (file)
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2020 Texas Instruments Incorporated
+# Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
 %YAML 1.2
 ---
 $id: http://devicetree.org/schemas/sound/ti,j721e-cpb-audio.yaml#
@@ -7,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Texas Instruments J721e Common Processor Board Audio Support
 
 maintainers:
-  - Peter Ujfalusi <peter.ujfalusi@ti.com>
+  - Peter Ujfalusi <peter.ujfalusi@gmail.com>
 
 description: |
   The audio support on the board is using pcm3168a codec connected to McASP10
index bb780f621628788c8983d706283ddf1da8a7217a..ee9f960de36b7c9eb083680d589b518272a0522d 100644 (file)
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2020 Texas Instruments Incorporated
+# Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
 %YAML 1.2
 ---
 $id: http://devicetree.org/schemas/sound/ti,j721e-cpb-ivi-audio.yaml#
@@ -7,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Texas Instruments J721e Common Processor Board Audio Support
 
 maintainers:
-  - Peter Ujfalusi <peter.ujfalusi@ti.com>
+  - Peter Ujfalusi <peter.ujfalusi@gmail.com>
 
 description: |
   The Infotainment board plugs into the Common Processor Board, the support of the
index 388245b91a55b1c88640b772dd715d8e3809dca5..148b3fb4ceaf85f69a57883efc0962f434dce008 100644 (file)
@@ -11,8 +11,12 @@ maintainers:
 
 properties:
   compatible:
-    items:
+    oneOf:
       - const: ti,j721e-usb
+      - const: ti,am64-usb
+      - items:
+          - const: ti,j721e-usb
+          - const: ti,am64-usb
 
   reg:
     description: module registers
index 2fb2ff297d69cb3121c75df5e10be0f850019b72..36ac2166ad675ee8e8881871700d04525d77576a 100644 (file)
@@ -48,12 +48,12 @@ or ``virtualenv``, depending on how your distribution packaged Python 3.
       those versions, you should run ``pip install 'docutils==0.12'``.
 
    #) It is recommended to use the RTD theme for html output. Depending
-      on the Sphinx version, it should be installed  in separate,
+      on the Sphinx version, it should be installed separately,
       with ``pip install sphinx_rtd_theme``.
 
-   #) Some ReST pages contain math expressions. Due to the way Sphinx work,
+   #) Some ReST pages contain math expressions. Due to the way Sphinx works,
       those expressions are written using LaTeX notation. It needs texlive
-      installed with amdfonts and amsmath in order to evaluate them.
+      installed with amsfonts and amsmath in order to evaluate them.
 
 In summary, if you want to install Sphinx version 1.7.9, you should do::
 
@@ -128,7 +128,7 @@ Sphinx Build
 ============
 
 The usual way to generate the documentation is to run ``make htmldocs`` or
-``make pdfdocs``. There are also other formats available, see the documentation
+``make pdfdocs``. There are also other formats available: see the documentation
 section of ``make help``. The generated documentation is placed in
 format-specific subdirectories under ``Documentation/output``.
 
@@ -303,17 +303,17 @@ and *targets* (e.g. a ref to ``:ref:`last row <last row>``` / :ref:`last row
         - head col 3
         - head col 4
 
-      * - column 1
+      * - row 1
         - field 1.1
         - field 1.2 with autospan
 
-      * - column 2
+      * - row 2
         - field 2.1
         - :rspan:`1` :cspan:`1` field 2.2 - 3.3
 
       * .. _`last row`:
 
-        - column 3
+        - row 3
 
 Rendered as:
 
@@ -325,17 +325,17 @@ Rendered as:
         - head col 3
         - head col 4
 
-      * - column 1
+      * - row 1
         - field 1.1
         - field 1.2 with autospan
 
-      * - column 2
+      * - row 2
         - field 2.1
         - :rspan:`1` :cspan:`1` field 2.2 - 3.3
 
       * .. _`last row`:
 
-        - column 3
+        - row 3
 
 Cross-referencing
 -----------------
@@ -361,7 +361,7 @@ Figures & Images
 
 If you want to add an image, you should use the ``kernel-figure`` and
 ``kernel-image`` directives. E.g. to insert a figure with a scalable
-image format use SVG (:ref:`svg_image_example`)::
+image format, use SVG (:ref:`svg_image_example`)::
 
     .. kernel-figure::  svg_image.svg
        :alt:    simple SVG image
@@ -375,7 +375,7 @@ image format use SVG (:ref:`svg_image_example`)::
 
    SVG image example
 
-The kernel figure (and image) directive support **DOT** formatted files, see
+The kernel figure (and image) directive supports **DOT** formatted files, see
 
 * DOT: http://graphviz.org/pdf/dotguide.pdf
 * Graphviz: http://www.graphviz.org/content/dot-language
@@ -394,7 +394,7 @@ A simple example (:ref:`hello_dot_file`)::
 
    DOT's hello world example
 
-Embed *render* markups (or languages) like Graphviz's **DOT** is provided by the
+Embedded *render* markups (or languages) like Graphviz's **DOT** are provided by the
 ``kernel-render`` directives.::
 
   .. kernel-render:: DOT
@@ -406,7 +406,7 @@ Embed *render* markups (or languages) like Graphviz's **DOT** is provided by the
      }
 
 How this will be rendered depends on the installed tools. If Graphviz is
-installed, you will see an vector image. If not the raw markup is inserted as
+installed, you will see a vector image. If not, the raw markup is inserted as
 *literal-block* (:ref:`hello_dot_render`).
 
 .. _hello_dot_render:
@@ -421,8 +421,8 @@ installed, you will see an vector image. If not the raw markup is inserted as
 
 The *render* directive has all the options known from the *figure* directive,
 plus option ``caption``.  If ``caption`` has a value, a *figure* node is
-inserted. If not, a *image* node is inserted. A ``caption`` is also needed, if
-you want to refer it (:ref:`hello_svg_render`).
+inserted. If not, an *image* node is inserted. A ``caption`` is also needed, if
+you want to refer to it (:ref:`hello_svg_render`).
 
 Embedded **SVG**::
 
index e588bccf5158370fb03dfe4db06b20ee93114108..c042176e17078f5a72eba00b501d665475566c76 100644 (file)
@@ -50,8 +50,8 @@ The following files belong to it:
   0x00000010        Memory Uncorrectable non-fatal
   0x00000020        Memory Uncorrectable fatal
   0x00000040        PCI Express Correctable
-  0x00000080        PCI Express Uncorrectable fatal
-  0x00000100        PCI Express Uncorrectable non-fatal
+  0x00000080        PCI Express Uncorrectable non-fatal
+  0x00000100        PCI Express Uncorrectable fatal
   0x00000200        Platform Correctable
   0x00000400        Platform Uncorrectable non-fatal
   0x00000800        Platform Uncorrectable fatal
index 922b3c8db666e17cbd5cdf60164132790bd755f8..749f518389c38a50e7337cb1263f0a3e20a3d9ca 100644 (file)
@@ -1,7 +1,7 @@
 .. SPDX-License-Identifier: GPL-2.0-or-later
 
 Kernel driver sbtsi_temp
-==================
+========================
 
 Supported hardware:
 
index d36768cf12506de3391bd883116da78761ec6983..9f6a1188195136089fda768bf2ab88cc60c5dae4 100644 (file)
@@ -598,7 +598,7 @@ more details, with real examples.
        explicitly added to $(targets).
 
        Assignments to $(targets) are without $(obj)/ prefix. if_changed may be
-       used in conjunction with custom rules as defined in "3.9 Custom Rules".
+       used in conjunction with custom rules as defined in "3.11 Custom Rules".
 
        Note: It is a typical mistake to forget the FORCE prerequisite.
        Another common pitfall is that whitespace is sometimes significant; for
index 6ed806e6061bba317d67475c67586b4eeab7387c..c3448929a824ab2373cbc64cb894ee70177f3ac8 100644 (file)
@@ -118,11 +118,11 @@ spinlock, but you may block holding a mutex. If you can't lock a mutex,
 your task will suspend itself, and be woken up when the mutex is
 released. This means the CPU can do something else while you are
 waiting. There are many cases when you simply can't sleep (see
-`What Functions Are Safe To Call From Interrupts? <#sleeping-things>`__),
+`What Functions Are Safe To Call From Interrupts?`_),
 and so have to use a spinlock instead.
 
 Neither type of lock is recursive: see
-`Deadlock: Simple and Advanced <#deadlock>`__.
+`Deadlock: Simple and Advanced`_.
 
 Locks and Uniprocessor Kernels
 ------------------------------
@@ -179,7 +179,7 @@ perfect world).
 
 Note that you can also use spin_lock_irq() or
 spin_lock_irqsave() here, which stop hardware interrupts
-as well: see `Hard IRQ Context <#hard-irq-context>`__.
+as well: see `Hard IRQ Context`_.
 
 This works perfectly for UP as well: the spin lock vanishes, and this
 macro simply becomes local_bh_disable()
@@ -230,7 +230,7 @@ The Same Softirq
 ~~~~~~~~~~~~~~~~
 
 The same softirq can run on the other CPUs: you can use a per-CPU array
-(see `Per-CPU Data <#per-cpu-data>`__) for better performance. If you're
+(see `Per-CPU Data`_) for better performance. If you're
 going so far as to use a softirq, you probably care about scalable
 performance enough to justify the extra complexity.
 
index d3fcf536d14e1bfd92cf67fe46b61d793a269729..61e850460e18f41b64d3c51e0ac396c3c9d69865 100644 (file)
@@ -164,46 +164,56 @@ Devlink health reporters
 
 NPA Reporters
 -------------
-The NPA reporters are responsible for reporting and recovering the following group of errors
+The NPA reporters are responsible for reporting and recovering the following group of errors:
+
 1. GENERAL events
+
    - Error due to operation of unmapped PF.
    - Error due to disabled alloc/free for other HW blocks (NIX, SSO, TIM, DPI and AURA).
+
 2. ERROR events
+
    - Fault due to NPA_AQ_INST_S read or NPA_AQ_RES_S write.
    - AQ Doorbell Error.
+
 3. RAS events
+
    - RAS Error Reporting for NPA_AQ_INST_S/NPA_AQ_RES_S.
+
 4. RVU events
+
    - Error due to unmapped slot.
 
-Sample Output
--------------
-~# devlink health
-pci/0002:01:00.0:
-  reporter hw_npa_intr
-      state healthy error 2872 recover 2872 last_dump_date 2020-12-10 last_dump_time 09:39:09 grace_period 0 auto_recover true auto_dump true
-  reporter hw_npa_gen
-      state healthy error 2872 recover 2872 last_dump_date 2020-12-11 last_dump_time 04:43:04 grace_period 0 auto_recover true auto_dump true
-  reporter hw_npa_err
-      state healthy error 2871 recover 2871 last_dump_date 2020-12-10 last_dump_time 09:39:17 grace_period 0 auto_recover true auto_dump true
-   reporter hw_npa_ras
-      state healthy error 0 recover 0 last_dump_date 2020-12-10 last_dump_time 09:32:40 grace_period 0 auto_recover true auto_dump true
+Sample Output::
+
+       ~# devlink health
+       pci/0002:01:00.0:
+         reporter hw_npa_intr
+             state healthy error 2872 recover 2872 last_dump_date 2020-12-10 last_dump_time 09:39:09 grace_period 0 auto_recover true auto_dump true
+         reporter hw_npa_gen
+             state healthy error 2872 recover 2872 last_dump_date 2020-12-11 last_dump_time 04:43:04 grace_period 0 auto_recover true auto_dump true
+         reporter hw_npa_err
+             state healthy error 2871 recover 2871 last_dump_date 2020-12-10 last_dump_time 09:39:17 grace_period 0 auto_recover true auto_dump true
+          reporter hw_npa_ras
+             state healthy error 0 recover 0 last_dump_date 2020-12-10 last_dump_time 09:32:40 grace_period 0 auto_recover true auto_dump true
 
 Each reporter dumps the
+
  - Error Type
  - Error Register value
  - Reason in words
 
-For eg:
-~# devlink health dump show  pci/0002:01:00.0 reporter hw_npa_gen
- NPA_AF_GENERAL:
-         NPA General Interrupt Reg : 1
-         NIX0: free disabled RX
-~# devlink health dump show  pci/0002:01:00.0 reporter hw_npa_intr
- NPA_AF_RVU:
-         NPA RVU Interrupt Reg : 1
-         Unmap Slot Error
-~# devlink health dump show  pci/0002:01:00.0 reporter hw_npa_err
- NPA_AF_ERR:
-        NPA Error Interrupt Reg : 4096
-        AQ Doorbell Error
+For example::
+
+       ~# devlink health dump show  pci/0002:01:00.0 reporter hw_npa_gen
+        NPA_AF_GENERAL:
+                NPA General Interrupt Reg : 1
+                NIX0: free disabled RX
+       ~# devlink health dump show  pci/0002:01:00.0 reporter hw_npa_intr
+        NPA_AF_RVU:
+                NPA RVU Interrupt Reg : 1
+                Unmap Slot Error
+       ~# devlink health dump show  pci/0002:01:00.0 reporter hw_npa_err
+        NPA_AF_ERR:
+               NPA Error Interrupt Reg : 4096
+               AQ Doorbell Error
index 4b9ed5874d5ad183d827b698d68c5b4146674a7a..ae2ae37cd921627f9b92c9c7c76ca20eae6bfdcb 100644 (file)
@@ -6,9 +6,9 @@
 netdev FAQ
 ==========
 
-Q: What is netdev?
-------------------
-A: It is a mailing list for all network-related Linux stuff.  This
+What is netdev?
+---------------
+It is a mailing list for all network-related Linux stuff.  This
 includes anything found under net/ (i.e. core code like IPv6) and
 drivers/net (i.e. hardware specific drivers) in the Linux source tree.
 
@@ -25,9 +25,9 @@ Aside from subsystems like that mentioned above, all network-related
 Linux development (i.e. RFC, review, comments, etc.) takes place on
 netdev.
 
-Q: How do the changes posted to netdev make their way into Linux?
------------------------------------------------------------------
-A: There are always two trees (git repositories) in play.  Both are
+How do the changes posted to netdev make their way into Linux?
+--------------------------------------------------------------
+There are always two trees (git repositories) in play.  Both are
 driven by David Miller, the main network maintainer.  There is the
 ``net`` tree, and the ``net-next`` tree.  As you can probably guess from
 the names, the ``net`` tree is for fixes to existing code already in the
@@ -37,9 +37,9 @@ for the future release.  You can find the trees here:
 - https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
 - https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
 
-Q: How often do changes from these trees make it to the mainline Linus tree?
-----------------------------------------------------------------------------
-A: To understand this, you need to know a bit of background information on
+How often do changes from these trees make it to the mainline Linus tree?
+-------------------------------------------------------------------------
+To understand this, you need to know a bit of background information on
 the cadence of Linux development.  Each new release starts off with a
 two week "merge window" where the main maintainers feed their new stuff
 to Linus for merging into the mainline tree.  After the two weeks, the
@@ -81,7 +81,8 @@ focus for ``net`` is on stabilization and bug fixes.
 
 Finally, the vX.Y gets released, and the whole cycle starts over.
 
-Q: So where are we now in this cycle?
+So where are we now in this cycle?
+----------------------------------
 
 Load the mainline (Linus) page here:
 
@@ -91,9 +92,9 @@ and note the top of the "tags" section.  If it is rc1, it is early in
 the dev cycle.  If it was tagged rc7 a week ago, then a release is
 probably imminent.
 
-Q: How do I indicate which tree (net vs. net-next) my patch should be in?
--------------------------------------------------------------------------
-A: Firstly, think whether you have a bug fix or new "next-like" content.
+How do I indicate which tree (net vs. net-next) my patch should be in?
+----------------------------------------------------------------------
+Firstly, think whether you have a bug fix or new "next-like" content.
 Then once decided, assuming that you use git, use the prefix flag, i.e.
 ::
 
@@ -105,48 +106,45 @@ in the above is just the subject text of the outgoing e-mail, and you
 can manually change it yourself with whatever MUA you are comfortable
 with.
 
-Q: I sent a patch and I'm wondering what happened to it?
---------------------------------------------------------
-Q: How can I tell whether it got merged?
-A: Start by looking at the main patchworks queue for netdev:
+I sent a patch and I'm wondering what happened to it - how can I tell whether it got merged?
+--------------------------------------------------------------------------------------------
+Start by looking at the main patchworks queue for netdev:
 
   https://patchwork.kernel.org/project/netdevbpf/list/
 
 The "State" field will tell you exactly where things are at with your
 patch.
 
-Q: The above only says "Under Review".  How can I find out more?
-----------------------------------------------------------------
-A: Generally speaking, the patches get triaged quickly (in less than
+The above only says "Under Review".  How can I find out more?
+-------------------------------------------------------------
+Generally speaking, the patches get triaged quickly (in less than
 48h).  So be patient.  Asking the maintainer for status updates on your
 patch is a good way to ensure your patch is ignored or pushed to the
 bottom of the priority list.
 
-Q: I submitted multiple versions of the patch series
-----------------------------------------------------
-Q: should I directly update patchwork for the previous versions of these
-patch series?
-A: No, please don't interfere with the patch status on patchwork, leave
+I submitted multiple versions of the patch series. Should I directly update patchwork for the previous versions of these patch series?
+--------------------------------------------------------------------------------------------------------------------------------------
+No, please don't interfere with the patch status on patchwork, leave
 it to the maintainer to figure out what is the most recent and current
 version that should be applied. If there is any doubt, the maintainer
 will reply and ask what should be done.
 
-Q: I made changes to only a few patches in a patch series should I resend only those changed?
----------------------------------------------------------------------------------------------
-A: No, please resend the entire patch series and make sure you do number your
+I made changes to only a few patches in a patch series should I resend only those changed?
+------------------------------------------------------------------------------------------
+No, please resend the entire patch series and make sure you do number your
 patches such that it is clear this is the latest and greatest set of patches
 that can be applied.
 
-Q: I submitted multiple versions of a patch series and it looks like a version other than the last one has been accepted, what should I do?
--------------------------------------------------------------------------------------------------------------------------------------------
-A: There is no revert possible, once it is pushed out, it stays like that.
+I submitted multiple versions of a patch series and it looks like a version other than the last one has been accepted, what should I do?
+----------------------------------------------------------------------------------------------------------------------------------------
+There is no revert possible, once it is pushed out, it stays like that.
 Please send incremental versions on top of what has been merged in order to fix
 the patches the way they would look like if your latest patch series was to be
 merged.
 
-Q: How can I tell what patches are queued up for backporting to the various stable releases?
---------------------------------------------------------------------------------------------
-A: Normally Greg Kroah-Hartman collects stable commits himself, but for
+How can I tell what patches are queued up for backporting to the various stable releases?
+-----------------------------------------------------------------------------------------
+Normally Greg Kroah-Hartman collects stable commits himself, but for
 networking, Dave collects up patches he deems critical for the
 networking subsystem, and then hands them off to Greg.
 
@@ -169,11 +167,9 @@ simply clone the repo, and then git grep the mainline commit ID, e.g.
   releases/3.9.8/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
   stable/stable-queue$
 
-Q: I see a network patch and I think it should be backported to stable.
------------------------------------------------------------------------
-Q: Should I request it via stable@vger.kernel.org like the references in
-the kernel's Documentation/process/stable-kernel-rules.rst file say?
-A: No, not for networking.  Check the stable queues as per above first
+I see a network patch and I think it should be backported to stable. Should I request it via stable@vger.kernel.org like the references in the kernel's Documentation/process/stable-kernel-rules.rst file say?
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+No, not for networking.  Check the stable queues as per above first
 to see if it is already queued.  If not, then send a mail to netdev,
 listing the upstream commit ID and why you think it should be a stable
 candidate.
@@ -190,11 +186,9 @@ mainline, the better the odds that it is an OK candidate for stable.  So
 scrambling to request a commit be added the day after it appears should
 be avoided.
 
-Q: I have created a network patch and I think it should be backported to stable.
---------------------------------------------------------------------------------
-Q: Should I add a Cc: stable@vger.kernel.org like the references in the
-kernel's Documentation/ directory say?
-A: No.  See above answer.  In short, if you think it really belongs in
+I have created a network patch and I think it should be backported to stable. Should I add a Cc: stable@vger.kernel.org like the references in the kernel's Documentation/ directory say?
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+No.  See above answer.  In short, if you think it really belongs in
 stable, then ensure you write a decent commit log that describes who
 gets impacted by the bug fix and how it manifests itself, and when the
 bug was introduced.  If you do that properly, then the commit will get
@@ -207,18 +201,18 @@ marker line as described in
 :ref:`Documentation/process/submitting-patches.rst <the_canonical_patch_format>`
 to temporarily embed that information into the patch that you send.
 
-Q: Are all networking bug fixes backported to all stable releases?
-------------------------------------------------------------------
-A: Due to capacity, Dave could only take care of the backports for the
+Are all networking bug fixes backported to all stable releases?
+---------------------------------------------------------------
+Due to capacity, Dave could only take care of the backports for the
 last two stable releases. For earlier stable releases, each stable
 branch maintainer is supposed to take care of them. If you find any
 patch is missing from an earlier stable branch, please notify
 stable@vger.kernel.org with either a commit ID or a formal patch
 backported, and CC Dave and other relevant networking developers.
 
-Q: Is the comment style convention different for the networking content?
-------------------------------------------------------------------------
-A: Yes, in a largely trivial way.  Instead of this::
+Is the comment style convention different for the networking content?
+---------------------------------------------------------------------
+Yes, in a largely trivial way.  Instead of this::
 
   /*
    * foobar blah blah blah
@@ -231,32 +225,30 @@ it is requested that you make it look like this::
    * another line of text
    */
 
-Q: I am working in existing code that has the former comment style and not the latter.
---------------------------------------------------------------------------------------
-Q: Should I submit new code in the former style or the latter?
-A: Make it the latter style, so that eventually all code in the domain
+I am working in existing code that has the former comment style and not the latter. Should I submit new code in the former style or the latter?
+-----------------------------------------------------------------------------------------------------------------------------------------------
+Make it the latter style, so that eventually all code in the domain
 of netdev is of this format.
 
-Q: I found a bug that might have possible security implications or similar.
----------------------------------------------------------------------------
-Q: Should I mail the main netdev maintainer off-list?**
-A: No. The current netdev maintainer has consistently requested that
+I found a bug that might have possible security implications or similar. Should I mail the main netdev maintainer off-list?
+---------------------------------------------------------------------------------------------------------------------------
+No. The current netdev maintainer has consistently requested that
 people use the mailing lists and not reach out directly.  If you aren't
 OK with that, then perhaps consider mailing security@kernel.org or
 reading about http://oss-security.openwall.org/wiki/mailing-lists/distros
 as possible alternative mechanisms.
 
-Q: What level of testing is expected before I submit my change?
----------------------------------------------------------------
-A: If your changes are against ``net-next``, the expectation is that you
+What level of testing is expected before I submit my change?
+------------------------------------------------------------
+If your changes are against ``net-next``, the expectation is that you
 have tested by layering your changes on top of ``net-next``.  Ideally
 you will have done run-time testing specific to your change, but at a
 minimum, your changes should survive an ``allyesconfig`` and an
 ``allmodconfig`` build without new warnings or failures.
 
-Q: How do I post corresponding changes to user space components?
-----------------------------------------------------------------
-A: User space code exercising kernel features should be posted
+How do I post corresponding changes to user space components?
+-------------------------------------------------------------
+User space code exercising kernel features should be posted
 alongside kernel patches. This gives reviewers a chance to see
 how any new interface is used and how well it works.
 
@@ -280,9 +272,9 @@ to the mailing list, e.g.::
 Posting as one thread is discouraged because it confuses patchwork
 (as of patchwork 2.2.2).
 
-Q: Any other tips to help ensure my net/net-next patch gets OK'd?
------------------------------------------------------------------
-A: Attention to detail.  Re-read your own work as if you were the
+Any other tips to help ensure my net/net-next patch gets OK'd?
+--------------------------------------------------------------
+Attention to detail.  Re-read your own work as if you were the
 reviewer.  You can start with using ``checkpatch.pl``, perhaps even with
 the ``--strict`` flag.  But do not be mindlessly robotic in doing so.
 If your change is a bug fix, make sure your commit log indicates the
index 5a85fcc80c7653b6e9c1d9c4741dfc112c4d486f..17bdcb746dcf530af859440aa04e78d4fb1226f4 100644 (file)
@@ -10,18 +10,177 @@ Introduction
 The following is a random collection of documentation regarding
 network devices.
 
-struct net_device allocation rules
-==================================
+struct net_device lifetime rules
+================================
 Network device structures need to persist even after module is unloaded and
 must be allocated with alloc_netdev_mqs() and friends.
 If device has registered successfully, it will be freed on last use
-by free_netdev(). This is required to handle the pathologic case cleanly
-(example: rmmod mydriver </sys/class/net/myeth/mtu )
+by free_netdev(). This is required to handle the pathological case cleanly
+(example: ``rmmod mydriver </sys/class/net/myeth/mtu``)
 
-alloc_netdev_mqs()/alloc_netdev() reserve extra space for driver
+alloc_netdev_mqs() / alloc_netdev() reserve extra space for driver
 private data which gets freed when the network device is freed. If
 separately allocated data is attached to the network device
-(netdev_priv(dev)) then it is up to the module exit handler to free that.
+(netdev_priv()) then it is up to the module exit handler to free that.
+
+There are two groups of APIs for registering struct net_device.
+First group can be used in normal contexts where ``rtnl_lock`` is not already
+held: register_netdev(), unregister_netdev().
+Second group can be used when ``rtnl_lock`` is already held:
+register_netdevice(), unregister_netdevice(), free_netdevice().
+
+Simple drivers
+--------------
+
+Most drivers (especially device drivers) handle lifetime of struct net_device
+in context where ``rtnl_lock`` is not held (e.g. driver probe and remove paths).
+
+In that case the struct net_device registration is done using
+the register_netdev(), and unregister_netdev() functions:
+
+.. code-block:: c
+
+  int probe()
+  {
+    struct my_device_priv *priv;
+    int err;
+
+    dev = alloc_netdev_mqs(...);
+    if (!dev)
+      return -ENOMEM;
+    priv = netdev_priv(dev);
+
+    /* ... do all device setup before calling register_netdev() ...
+     */
+
+    err = register_netdev(dev);
+    if (err)
+      goto err_undo;
+
+    /* net_device is visible to the user! */
+
+  err_undo:
+    /* ... undo the device setup ... */
+    free_netdev(dev);
+    return err;
+  }
+
+  void remove()
+  {
+    unregister_netdev(dev);
+    free_netdev(dev);
+  }
+
+Note that after calling register_netdev() the device is visible in the system.
+Users can open it and start sending / receiving traffic immediately,
+or run any other callback, so all initialization must be done prior to
+registration.
+
+unregister_netdev() closes the device and waits for all users to be done
+with it. The memory of struct net_device itself may still be referenced
+by sysfs but all operations on that device will fail.
+
+free_netdev() can be called after unregister_netdev() returns on when
+register_netdev() failed.
+
+Device management under RTNL
+----------------------------
+
+Registering struct net_device while in context which already holds
+the ``rtnl_lock`` requires extra care. In those scenarios most drivers
+will want to make use of struct net_device's ``needs_free_netdev``
+and ``priv_destructor`` members for freeing of state.
+
+Example flow of netdev handling under ``rtnl_lock``:
+
+.. code-block:: c
+
+  static void my_setup(struct net_device *dev)
+  {
+    dev->needs_free_netdev = true;
+  }
+
+  static void my_destructor(struct net_device *dev)
+  {
+    some_obj_destroy(priv->obj);
+    some_uninit(priv);
+  }
+
+  int create_link()
+  {
+    struct my_device_priv *priv;
+    int err;
+
+    ASSERT_RTNL();
+
+    dev = alloc_netdev(sizeof(*priv), "net%d", NET_NAME_UNKNOWN, my_setup);
+    if (!dev)
+      return -ENOMEM;
+    priv = netdev_priv(dev);
+
+    /* Implicit constructor */
+    err = some_init(priv);
+    if (err)
+      goto err_free_dev;
+
+    priv->obj = some_obj_create();
+    if (!priv->obj) {
+      err = -ENOMEM;
+      goto err_some_uninit;
+    }
+    /* End of constructor, set the destructor: */
+    dev->priv_destructor = my_destructor;
+
+    err = register_netdevice(dev);
+    if (err)
+      /* register_netdevice() calls destructor on failure */
+      goto err_free_dev;
+
+    /* If anything fails now unregister_netdevice() (or unregister_netdev())
+     * will take care of calling my_destructor and free_netdev().
+     */
+
+    return 0;
+
+  err_some_uninit:
+    some_uninit(priv);
+  err_free_dev:
+    free_netdev(dev);
+    return err;
+  }
+
+If struct net_device.priv_destructor is set it will be called by the core
+some time after unregister_netdevice(), it will also be called if
+register_netdevice() fails. The callback may be invoked with or without
+``rtnl_lock`` held.
+
+There is no explicit constructor callback, driver "constructs" the private
+netdev state after allocating it and before registration.
+
+Setting struct net_device.needs_free_netdev makes core call free_netdevice()
+automatically after unregister_netdevice() when all references to the device
+are gone. It only takes effect after a successful call to register_netdevice()
+so if register_netdevice() fails driver is responsible for calling
+free_netdev().
+
+free_netdev() is safe to call on error paths right after unregister_netdevice()
+or when register_netdevice() fails. Parts of netdev (de)registration process
+happen after ``rtnl_lock`` is released, therefore in those cases free_netdev()
+will defer some of the processing until ``rtnl_lock`` is released.
+
+Devices spawned from struct rtnl_link_ops should never free the
+struct net_device directly.
+
+.ndo_init and .ndo_uninit
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+``.ndo_init`` and ``.ndo_uninit`` callbacks are called during net_device
+registration and de-registration, under ``rtnl_lock``. Drivers can use
+those e.g. when parts of their init process need to run under ``rtnl_lock``.
+
+``.ndo_init`` runs before device is visible in the system, ``.ndo_uninit``
+runs during de-registering after device is closed but other subsystems
+may still have outstanding references to the netdevice.
 
 MTU
 ===
@@ -64,8 +223,8 @@ ndo_do_ioctl:
        Context: process
 
 ndo_get_stats:
-       Synchronization: dev_base_lock rwlock.
-       Context: nominally process, but don't sleep inside an rwlock
+       Synchronization: rtnl_lock() semaphore, dev_base_lock rwlock, or RCU.
+       Context: atomic (can't sleep under rwlock or RCU)
 
 ndo_start_xmit:
        Synchronization: __netif_tx_lock spinlock.
index 6c009ceb118323233fb38223d7e5d3a61a51a706..500ef60b1b821422741b7105d947c3dec640ab8b 100644 (file)
@@ -8,7 +8,7 @@ Abstract
 ========
 
 This file documents the mmap() facility available with the PACKET
-socket interface on 2.4/2.6/3.x kernels. This type of sockets is used for
+socket interface. This type of sockets is used for
 
 i) capture network traffic with utilities like tcpdump,
 ii) transmit network traffic, or any other that needs raw
@@ -25,12 +25,12 @@ Please send your comments to
 Why use PACKET_MMAP
 ===================
 
-In Linux 2.4/2.6/3.x if PACKET_MMAP is not enabled, the capture process is very
+Non PACKET_MMAP capture process (plain AF_PACKET) is very
 inefficient. It uses very limited buffers and requires one system call to
 capture each packet, it requires two if you want to get packet's timestamp
 (like libpcap always does).
 
-In the other hand PACKET_MMAP is very efficient. PACKET_MMAP provides a size
+On the other hand PACKET_MMAP is very efficient. PACKET_MMAP provides a size
 configurable circular buffer mapped in user space that can be used to either
 send or receive packets. This way reading packets just needs to wait for them,
 most of the time there is no need to issue a single system call. Concerning
@@ -252,8 +252,7 @@ PACKET_MMAP setting constraints
 
 In kernel versions prior to 2.4.26 (for the 2.4 branch) and 2.6.5 (2.6 branch),
 the PACKET_MMAP buffer could hold only 32768 frames in a 32 bit architecture or
-16384 in a 64 bit architecture. For information on these kernel versions
-see http://pusa.uv.es/~ulisses/packet_mmap/packet_mmap.pre-2.4.26_2.6.5.txt
+16384 in a 64 bit architecture.
 
 Block size limit
 ----------------
@@ -437,7 +436,7 @@ and the following flags apply:
 Capture process
 ^^^^^^^^^^^^^^^
 
-     from include/linux/if_packet.h
+From include/linux/if_packet.h::
 
      #define TP_STATUS_COPY          (1 << 1)
      #define TP_STATUS_LOSING        (1 << 2)
index 0f55c6d540f9794097d96e65c8d319210c02fbd8..5f0dea3d571e359a2655defdcf9b7383b9801ba8 100644 (file)
@@ -530,7 +530,10 @@ TLS device feature flags only control adding of new TLS connection
 offloads, old connections will remain active after flags are cleared.
 
 TLS encryption cannot be offloaded to devices without checksum calculation
-offload. Hence, TLS TX device feature flag requires NETIF_F_HW_CSUM being set.
+offload. Hence, TLS TX device feature flag requires TX csum offload being set.
 Disabling the latter implies clearing the former. Disabling TX checksum offload
 should not affect old connections, and drivers should make sure checksum
 calculation does not break for them.
+Similarly, device-offloaded TLS decryption implies doing RXCSUM. If the user
+does not want to enable RX csum offload, TLS RX device feature is disabled
+as well.
index c27e59d2f702943735a5db936b6ca7fb42f60eca..0825dc496f22d7e75de88ec700704fa81f302972 100644 (file)
@@ -249,10 +249,8 @@ features; most of these are found in the "kernel hacking" submenu.  Several
 of these options should be turned on for any kernel used for development or
 testing purposes.  In particular, you should turn on:
 
- - ENABLE_MUST_CHECK and FRAME_WARN to get an
-   extra set of warnings for problems like the use of deprecated interfaces
-   or ignoring an important return value from a function.  The output
-   generated by these warnings can be verbose, but one need not worry about
+ - FRAME_WARN to get warnings for stack frames larger than a given amount.
+   The output generated can be verbose, but one need not worry about
    warnings from other parts of the kernel.
 
  - DEBUG_OBJECTS will add code to track the lifetime of various objects
index fe52c314b76391a4f5fc071c65ff98e92f0432cd..b36af65a08edffd6331c0330c780f57f5919798d 100644 (file)
@@ -1501,7 +1501,7 @@ Module for Digigram miXart8 sound cards.
 
 This module supports multiple cards.
 Note: One miXart8 board will be represented as 4 alsa cards.
-See MIXART.txt for details.
+See Documentation/sound/cards/mixart.rst for details.
 
 When the driver is compiled as a module and the hotplug firmware
 is supported, the firmware data is loaded via hotplug automatically.
index 73bbd59afc33aa9d42a6380d479322dcfecb1100..e6365836fa8bdeafc639502bb8a5c4eb5f70be26 100644 (file)
@@ -71,7 +71,7 @@ core/oss
 The codes for PCM and mixer OSS emulation modules are stored in this
 directory. The rawmidi OSS emulation is included in the ALSA rawmidi
 code since it's quite small. The sequencer code is stored in
-``core/seq/oss`` directory (see `below <#core-seq-oss>`__).
+``core/seq/oss`` directory (see `below <core/seq/oss_>`__).
 
 core/seq
 ~~~~~~~~
@@ -382,7 +382,7 @@ where ``enable[dev]`` is the module option.
 Each time the ``probe`` callback is called, check the availability of
 the device. If not available, simply increment the device index and
 returns. dev will be incremented also later (`step 7
-<#set-the-pci-driver-data-and-return-zero>`__).
+<7) Set the PCI driver data and return zero._>`__).
 
 2) Create a card instance
 ~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -450,10 +450,10 @@ field contains the information shown in ``/proc/asound/cards``.
 5) Create other components, such as mixer, MIDI, etc.
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-Here you define the basic components such as `PCM <#PCM-Interface>`__,
-mixer (e.g. `AC97 <#API-for-AC97-Codec>`__), MIDI (e.g.
-`MPU-401 <#MIDI-MPU401-UART-Interface>`__), and other interfaces.
-Also, if you want a `proc file <#Proc-Interface>`__, define it here,
+Here you define the basic components such as `PCM <PCM Interface_>`__,
+mixer (e.g. `AC97 <API for AC97 Codec_>`__), MIDI (e.g.
+`MPU-401 <MIDI (MPU401-UART) Interface_>`__), and other interfaces.
+Also, if you want a `proc file <Proc Interface_>`__, define it here,
 too.
 
 6) Register the card instance.
@@ -941,7 +941,7 @@ The allocation of an interrupt source is done like this:
   chip->irq = pci->irq;
 
 where :c:func:`snd_mychip_interrupt()` is the interrupt handler
-defined `later <#pcm-interface-interrupt-handler>`__. Note that
+defined `later <PCM Interrupt Handler_>`__. Note that
 ``chip->irq`` should be defined only when :c:func:`request_irq()`
 succeeded.
 
@@ -3104,7 +3104,7 @@ processing the output stream in the irq handler.
 
 If the MPU-401 interface shares its interrupt with the other logical
 devices on the card, set ``MPU401_INFO_IRQ_HOOK`` (see
-`below <#MIDI-Interrupt-Handler>`__).
+`below <MIDI Interrupt Handler_>`__).
 
 Usually, the port address corresponds to the command port and port + 1
 corresponds to the data port. If not, you may change the ``cport``
index 70254eaa5229ff93d94453d27f3e439f25a3ee05..c136e254b4960270863e86ecc01325771ab5ab6a 100644 (file)
@@ -392,9 +392,14 @@ This ioctl is obsolete and has been removed.
 
 Errors:
 
-  =====      =============================
+  =======    ==============================================================
   EINTR      an unmasked signal is pending
-  =====      =============================
+  ENOEXEC    the vcpu hasn't been initialized or the guest tried to execute
+             instructions from device memory (arm64)
+  ENOSYS     data abort outside memslots with no syndrome info and
+             KVM_CAP_ARM_NISV_TO_USER not enabled (arm64)
+  EPERM      SVE feature set but not finalized (arm64)
+  =======    ==============================================================
 
 This ioctl is used to run a guest virtual cpu.  While there are no
 explicit parameters, there is an implicit parameter block that can be
index 546aa66428c9f872b59186f491df397be7353769..705776b31c8de0f0124fe6515ba846710ec24287 100644 (file)
@@ -203,8 +203,8 @@ F:  include/uapi/linux/nl80211.h
 F:     net/wireless/
 
 8169 10/100/1000 GIGABIT ETHERNET DRIVER
-M:     Realtek linux nic maintainers <nic_swsd@realtek.com>
 M:     Heiner Kallweit <hkallweit1@gmail.com>
+M:     nic_swsd@realtek.com
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/realtek/r8169*
@@ -820,7 +820,6 @@ M:  Netanel Belgazal <netanel@amazon.com>
 M:     Arthur Kiyanovski <akiyano@amazon.com>
 R:     Guy Tzalik <gtzalik@amazon.com>
 R:     Saeed Bishara <saeedb@amazon.com>
-R:     Zorik Machulsky <zorik@amazon.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     Documentation/networking/device_drivers/ethernet/amazon/ena.rst
@@ -907,7 +906,7 @@ AMD KFD
 M:     Felix Kuehling <Felix.Kuehling@amd.com>
 L:     amd-gfx@lists.freedesktop.org
 S:     Supported
-T:     git git://people.freedesktop.org/~agd5f/linux
+T:     git https://gitlab.freedesktop.org/agd5f/linux.git
 F:     drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd*.[ch]
 F:     drivers/gpu/drm/amd/amdkfd/
 F:     drivers/gpu/drm/amd/include/cik_structs.h
@@ -2119,7 +2118,7 @@ N:        atmel
 ARM/Microchip Sparx5 SoC support
 M:     Lars Povlsen <lars.povlsen@microchip.com>
 M:     Steen Hegelund <Steen.Hegelund@microchip.com>
-M:     Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
+M:     UNGLinuxDriver@microchip.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Supported
 T:     git git://github.com/microchip-ung/linux-upstream.git
@@ -2942,7 +2941,6 @@ S:        Maintained
 F:     drivers/hwmon/asus_atk0110.c
 
 ATLX ETHERNET DRIVERS
-M:     Jay Cliburn <jcliburn@gmail.com>
 M:     Chris Snook <chris.snook@gmail.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
@@ -3336,7 +3334,7 @@ F:        arch/riscv/net/
 X:     arch/riscv/net/bpf_jit_comp64.c
 
 BPF JIT for RISC-V (64-bit)
-M:     Björn Töpel <bjorn.topel@gmail.com>
+M:     Björn Töpel <bjorn@kernel.org>
 L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
 S:     Maintained
@@ -3556,7 +3554,7 @@ S:        Supported
 F:     drivers/net/ethernet/broadcom/bnxt/
 
 BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
-M:     Arend van Spriel <arend.vanspriel@broadcom.com>
+M:     Arend van Spriel <aspriel@gmail.com>
 M:     Franky Lin <franky.lin@broadcom.com>
 M:     Hante Meuleman <hante.meuleman@broadcom.com>
 M:     Chi-hsien Lin <chi-hsien.lin@infineon.com>
@@ -3883,7 +3881,7 @@ F:        drivers/mtd/nand/raw/cadence-nand-controller.c
 CADENCE USB3 DRD IP DRIVER
 M:     Peter Chen <peter.chen@nxp.com>
 M:     Pawel Laszczak <pawell@cadence.com>
-M:     Roger Quadros <rogerq@ti.com>
+R:     Roger Quadros <rogerq@kernel.org>
 R:     Aswath Govindraju <a-govindraju@ti.com>
 L:     linux-usb@vger.kernel.org
 S:     Maintained
@@ -3961,7 +3959,7 @@ F:        net/can/
 CAN-J1939 NETWORK LAYER
 M:     Robin van der Gracht <robin@protonic.nl>
 M:     Oleksij Rempel <o.rempel@pengutronix.de>
-R:     Pengutronix Kernel Team <kernel@pengutronix.de>
+R:     kernel@pengutronix.de
 L:     linux-can@vger.kernel.org
 S:     Maintained
 F:     Documentation/networking/j1939.rst
@@ -4588,7 +4586,7 @@ B:        https://bugzilla.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
 F:     Documentation/admin-guide/pm/cpuidle.rst
 F:     Documentation/driver-api/pm/cpuidle.rst
-F:     drivers/cpuidle/*
+F:     drivers/cpuidle/
 F:     include/linux/cpuidle.h
 
 CPU POWER MONITORING SUBSYSTEM
@@ -4922,9 +4920,8 @@ F:        Documentation/scsi/dc395x.rst
 F:     drivers/scsi/dc395x.*
 
 DCCP PROTOCOL
-M:     Gerrit Renker <gerrit@erg.abdn.ac.uk>
 L:     dccp@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp
 F:     include/linux/dccp.h
 F:     include/linux/tfrc.h
@@ -7363,7 +7360,6 @@ L:        linux-hardening@vger.kernel.org
 S:     Maintained
 F:     Documentation/kbuild/gcc-plugins.rst
 F:     scripts/Makefile.gcc-plugins
-F:     scripts/gcc-plugin.sh
 F:     scripts/gcc-plugins/
 
 GCOV BASED KERNEL PROFILING
@@ -9240,7 +9236,7 @@ F:        tools/testing/selftests/sgx/*
 K:     \bSGX_
 
 INTERCONNECT API
-M:     Georgi Djakov <georgi.djakov@linaro.org>
+M:     Georgi Djakov <djakov@kernel.org>
 L:     linux-pm@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/interconnect/
@@ -9273,7 +9269,7 @@ F:        drivers/net/ethernet/sgi/ioc3-eth.c
 
 IOMAP FILESYSTEM LIBRARY
 M:     Christoph Hellwig <hch@infradead.org>
-M:     Darrick J. Wong <darrick.wong@oracle.com>
+M:     Darrick J. Wong <djwong@kernel.org>
 M:     linux-xfs@vger.kernel.org
 M:     linux-fsdevel@vger.kernel.org
 L:     linux-xfs@vger.kernel.org
@@ -9327,7 +9323,6 @@ W:        http://www.adaptec.com/
 F:     drivers/scsi/ips*
 
 IPVS
-M:     Wensong Zhang <wensong@linux-vs.org>
 M:     Simon Horman <horms@verge.net.au>
 M:     Julian Anastasov <ja@ssi.bg>
 L:     netdev@vger.kernel.org
@@ -9776,7 +9771,7 @@ F:        tools/testing/selftests/kvm/s390x/
 
 KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86)
 M:     Paolo Bonzini <pbonzini@redhat.com>
-R:     Sean Christopherson <sean.j.christopherson@intel.com>
+R:     Sean Christopherson <seanjc@google.com>
 R:     Vitaly Kuznetsov <vkuznets@redhat.com>
 R:     Wanpeng Li <wanpengli@tencent.com>
 R:     Jim Mattson <jmattson@google.com>
@@ -10260,7 +10255,6 @@ S:      Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
 F:     Documentation/atomic_bitops.txt
 F:     Documentation/atomic_t.txt
-F:     Documentation/core-api/atomic_ops.rst
 F:     Documentation/core-api/refcount-vs-atomic.rst
 F:     Documentation/litmus-tests/
 F:     Documentation/memory-barriers.txt
@@ -10847,7 +10841,7 @@ F:      drivers/media/radio/radio-maxiradio*
 
 MCAN MMIO DEVICE DRIVER
 M:     Dan Murphy <dmurphy@ti.com>
-M:     Sriram Dash <sriram.dash@samsung.com>
+M:     Pankaj Sharma <pankj.sharma@samsung.com>
 L:     linux-can@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/can/bosch,m_can.yaml
@@ -11667,7 +11661,7 @@ F:      drivers/media/platform/atmel/atmel-isi.h
 
 MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER
 M:     Woojung Huh <woojung.huh@microchip.com>
-M:     Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
+M:     UNGLinuxDriver@microchip.com
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
@@ -11677,7 +11671,7 @@ F:      net/dsa/tag_ksz.c
 
 MICROCHIP LAN743X ETHERNET DRIVER
 M:     Bryan Whitehead <bryan.whitehead@microchip.com>
-M:     Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
+M:     UNGLinuxDriver@microchip.com
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/microchip/lan743x_*
@@ -11771,7 +11765,7 @@ F:      drivers/net/wireless/microchip/wilc1000/
 
 MICROSEMI MIPS SOCS
 M:     Alexandre Belloni <alexandre.belloni@bootlin.com>
-M:     Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
+M:     UNGLinuxDriver@microchip.com
 L:     linux-mips@vger.kernel.org
 S:     Supported
 F:     Documentation/devicetree/bindings/mips/mscc.txt
@@ -12418,7 +12412,6 @@ F:      tools/testing/selftests/net/ipsec.c
 
 NETWORKING [IPv4/IPv6]
 M:     "David S. Miller" <davem@davemloft.net>
-M:     Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
 M:     Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
 L:     netdev@vger.kernel.org
 S:     Maintained
@@ -12475,7 +12468,6 @@ F:      net/ipv6/tcp*.c
 
 NETWORKING [TLS]
 M:     Boris Pismenny <borisp@nvidia.com>
-M:     Aviad Yehezkel <aviadye@nvidia.com>
 M:     John Fastabend <john.fastabend@gmail.com>
 M:     Daniel Borkmann <daniel@iogearbox.net>
 M:     Jakub Kicinski <kuba@kernel.org>
@@ -12825,10 +12817,10 @@ F:    tools/objtool/
 F:     include/linux/objtool.h
 
 OCELOT ETHERNET SWITCH DRIVER
-M:     Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
 M:     Vladimir Oltean <vladimir.oltean@nxp.com>
 M:     Claudiu Manoil <claudiu.manoil@nxp.com>
 M:     Alexandre Belloni <alexandre.belloni@bootlin.com>
+M:     UNGLinuxDriver@microchip.com
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/dsa/ocelot/*
@@ -12850,7 +12842,7 @@ F:      include/misc/ocxl*
 F:     include/uapi/misc/ocxl.h
 
 OMAP AUDIO SUPPORT
-M:     Peter Ujfalusi <peter.ujfalusi@ti.com>
+M:     Peter Ujfalusi <peter.ujfalusi@gmail.com>
 M:     Jarkko Nikula <jarkko.nikula@bitmer.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 L:     linux-omap@vger.kernel.org
@@ -13890,7 +13882,7 @@ F:      drivers/platform/x86/peaq-wmi.c
 
 PENSANDO ETHERNET DRIVERS
 M:     Shannon Nelson <snelson@pensando.io>
-M:     Pensando Drivers <drivers@pensando.io>
+M:     drivers@pensando.io
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     Documentation/networking/device_drivers/ethernet/pensando/ionic.rst
@@ -14669,7 +14661,7 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
 F:     drivers/net/wireless/ath/ath11k/
 
 QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
-M:     QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
+M:     ath9k-devel@qca.qualcomm.com
 L:     linux-wireless@vger.kernel.org
 S:     Supported
 W:     https://wireless.wiki.kernel.org/en/users/Drivers/ath9k
@@ -14820,7 +14812,7 @@ M:      Alex Deucher <alexander.deucher@amd.com>
 M:     Christian König <christian.koenig@amd.com>
 L:     amd-gfx@lists.freedesktop.org
 S:     Supported
-T:     git git://people.freedesktop.org/~agd5f/linux
+T:     git https://gitlab.freedesktop.org/agd5f/linux.git
 F:     drivers/gpu/drm/amd/
 F:     drivers/gpu/drm/radeon/
 F:     include/uapi/drm/amdgpu_drm.h
@@ -16321,6 +16313,7 @@ M:      Pekka Enberg <penberg@kernel.org>
 M:     David Rientjes <rientjes@google.com>
 M:     Joonsoo Kim <iamjoonsoo.kim@lge.com>
 M:     Andrew Morton <akpm@linux-foundation.org>
+M:     Vlastimil Babka <vbabka@suse.cz>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     include/linux/sl?b*.h
@@ -16710,6 +16703,8 @@ M:      Samuel Thibault <samuel.thibault@ens-lyon.org>
 L:     speakup@linux-speakup.org
 S:     Odd Fixes
 W:     http://www.linux-speakup.org/
+W:     https://github.com/linux-speakup/speakup
+B:     https://github.com/linux-speakup/speakup/issues
 F:     drivers/accessibility/speakup/
 
 SPEAR CLOCK FRAMEWORK SUPPORT
@@ -17541,7 +17536,7 @@ F:      arch/xtensa/
 F:     drivers/irqchip/irq-xtensa-*
 
 TEXAS INSTRUMENTS ASoC DRIVERS
-M:     Peter Ujfalusi <peter.ujfalusi@ti.com>
+M:     Peter Ujfalusi <peter.ujfalusi@gmail.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Maintained
 F:     sound/soc/ti/
@@ -17553,6 +17548,19 @@ S:     Supported
 F:     Documentation/devicetree/bindings/iio/dac/ti,dac7612.txt
 F:     drivers/iio/dac/ti-dac7612.c
 
+TEXAS INSTRUMENTS DMA DRIVERS
+M:     Peter Ujfalusi <peter.ujfalusi@gmail.com>
+L:     dmaengine@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt
+F:     Documentation/devicetree/bindings/dma/ti-edma.txt
+F:     Documentation/devicetree/bindings/dma/ti/
+F:     drivers/dma/ti/
+X:     drivers/dma/ti/cppi41.c
+F:     include/linux/dma/k3-udma-glue.h
+F:     include/linux/dma/ti-cppi5.h
+F:     include/linux/dma/k3-psil.h
+
 TEXAS INSTRUMENTS' SYSTEM CONTROL INTERFACE (TISCI) PROTOCOL DRIVER
 M:     Nishanth Menon <nm@ti.com>
 M:     Tero Kristo <t-kristo@ti.com>
@@ -17838,7 +17846,7 @@ F:      Documentation/devicetree/bindings/net/nfc/trf7970a.txt
 F:     drivers/nfc/trf7970a.c
 
 TI TWL4030 SERIES SOC CODEC DRIVER
-M:     Peter Ujfalusi <peter.ujfalusi@ti.com>
+M:     Peter Ujfalusi <peter.ujfalusi@gmail.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Maintained
 F:     sound/soc/codecs/twl4030*
@@ -18370,7 +18378,7 @@ F:      include/linux/usb/isp116x.h
 
 USB LAN78XX ETHERNET DRIVER
 M:     Woojung Huh <woojung.huh@microchip.com>
-M:     Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
+M:     UNGLinuxDriver@microchip.com
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/microchip,lan78xx.txt
@@ -18484,7 +18492,7 @@ F:      drivers/net/usb/smsc75xx.*
 
 USB SMSC95XX ETHERNET DRIVER
 M:     Steve Glendinning <steve.glendinning@shawell.net>
-M:     Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
+M:     UNGLinuxDriver@microchip.com
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/usb/smsc95xx.*
@@ -19031,7 +19039,7 @@ F:      drivers/input/mouse/vmmouse.h
 
 VMWARE VMXNET3 ETHERNET DRIVER
 M:     Ronak Doshi <doshir@vmware.com>
-M:     "VMware, Inc." <pv-drivers@vmware.com>
+M:     pv-drivers@vmware.com
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/vmxnet3/
@@ -19058,7 +19066,6 @@ K:      regulator_get_optional
 
 VRF
 M:     David Ahern <dsahern@kernel.org>
-M:     Shrijeet Mukherjee <shrijeet@gmail.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/networking/vrf.rst
@@ -19409,7 +19416,7 @@ F:      drivers/net/ethernet/*/*/*xdp*
 K:     (?:\b|_)xdp(?:\b|_)
 
 XDP SOCKETS (AF_XDP)
-M:     Björn Töpel <bjorn.topel@intel.com>
+M:     Björn Töpel <bjorn@kernel.org>
 M:     Magnus Karlsson <magnus.karlsson@intel.com>
 R:     Jonathan Lemon <jonathan.lemon@gmail.com>
 L:     netdev@vger.kernel.org
@@ -19505,7 +19512,7 @@ F:      arch/x86/xen/*swiotlb*
 F:     drivers/xen/*swiotlb*
 
 XFS FILESYSTEM
-M:     Darrick J. Wong <darrick.wong@oracle.com>
+M:     Darrick J. Wong <djwong@kernel.org>
 M:     linux-xfs@vger.kernel.org
 L:     linux-xfs@vger.kernel.org
 S:     Supported
index 3d328b7ab20030c9007c017a671b74ab9b580b84..b0e4767735dca21d441c0ae373c53cf9a09ecf11 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 11
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc4
 NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
index 78c6f05b10f915f50a8379058d32f93480f8497c..24862d15f3a36936fae1036821cf2b948208da59 100644 (file)
@@ -1105,6 +1105,12 @@ config HAVE_ARCH_PFN_VALID
 config ARCH_SUPPORTS_DEBUG_PAGEALLOC
        bool
 
+config ARCH_SPLIT_ARG64
+       bool
+       help
+          If a 32-bit architecture requires 64-bit arguments to be split into
+          pairs of 32-bit arguments, select this option.
+
 source "kernel/gcov/Kconfig"
 
 source "scripts/gcc-plugins/Kconfig"
diff --git a/arch/alpha/include/asm/local64.h b/arch/alpha/include/asm/local64.h
deleted file mode 100644 (file)
index 36c93b5..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
index 0c6bf0d1df7ad1ea1028f649559829cf6cca0793..578bdbbb0fa7fc40d2a14e895ac28ecf0d6b541d 100644 (file)
@@ -102,16 +102,22 @@ libs-y            += arch/arc/lib/ $(LIBGCC)
 
 boot           := arch/arc/boot
 
-#default target for make without any arguments.
-KBUILD_IMAGE   := $(boot)/bootpImage
-
-all:   bootpImage
-bootpImage: vmlinux
-
-boot_targets += uImage uImage.bin uImage.gz
+boot_targets := uImage.bin uImage.gz uImage.lzma
 
+PHONY += $(boot_targets)
 $(boot_targets): vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 
+uimage-default-y                       := uImage.bin
+uimage-default-$(CONFIG_KERNEL_GZIP)   := uImage.gz
+uimage-default-$(CONFIG_KERNEL_LZMA)   := uImage.lzma
+
+PHONY += uImage
+uImage: $(uimage-default-y)
+       @ln -sf $< $(boot)/uImage
+       @$(kecho) '  Image $(boot)/uImage is ready'
+
+CLEAN_FILES += $(boot)/uImage
+
 archclean:
        $(Q)$(MAKE) $(clean)=$(boot)
index 538b92f4dd2530459c190c80517412200a46bd6e..5648748c285f52c46a5fbe8df1d5c8e683cafc88 100644 (file)
@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
-targets := vmlinux.bin vmlinux.bin.gz uImage
 
 # uImage build relies on mkimage being availble on your host for ARC target
 # You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage
@@ -7,23 +6,18 @@ targets := vmlinux.bin vmlinux.bin.gz uImage
 
 OBJCOPYFLAGS= -O binary -R .note -R .note.gnu.build-id -R .comment -S
 
-LINUX_START_TEXT = $$(readelf -h vmlinux | \
+LINUX_START_TEXT = $$($(READELF) -h vmlinux | \
                        grep "Entry point address" | grep -o 0x.*)
 
 UIMAGE_LOADADDR    = $(CONFIG_LINUX_LINK_BASE)
 UIMAGE_ENTRYADDR   = $(LINUX_START_TEXT)
 
-suffix-y := bin
-suffix-$(CONFIG_KERNEL_GZIP)   := gz
-suffix-$(CONFIG_KERNEL_LZMA)   := lzma
-
-targets += uImage
+targets += vmlinux.bin
+targets += vmlinux.bin.gz
+targets += vmlinux.bin.lzma
 targets += uImage.bin
 targets += uImage.gz
 targets += uImage.lzma
-extra-y += vmlinux.bin
-extra-y += vmlinux.bin.gz
-extra-y += vmlinux.bin.lzma
 
 $(obj)/vmlinux.bin: vmlinux FORCE
        $(call if_changed,objcopy)
@@ -42,7 +36,3 @@ $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
 
 $(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
        $(call if_changed,uimage,lzma)
-
-$(obj)/uImage: $(obj)/uImage.$(suffix-y)
-       @ln -sf $(notdir $<) $@
-       @echo '  Image $@ is ready'
index 81f4edec0c2a93ab39b494522dda384740464663..3c1afa524b9c26881f13d77ad476bc7ae5e02253 100644 (file)
@@ -1,7 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 generic-y += extable.h
 generic-y += kvm_para.h
-generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += parport.h
 generic-y += user.h
index 23e41e890eda7069910742f4f679f9fb47eeaf68..ad9b7fe4dba363a4713a16275a99f5968802eca9 100644 (file)
@@ -10,6 +10,7 @@
 #ifndef __ASSEMBLY__
 
 #define clear_page(paddr)              memset((paddr), 0, PAGE_SIZE)
+#define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
 #define copy_page(to, from)            memcpy((to), (from), PAGE_SIZE)
 
 struct vm_area_struct;
index 1f5308abf36d650bc8c8a193d96f19f32aeecfc8..1743506081da6230646640a13e11004b3b8892e9 100644 (file)
@@ -307,7 +307,7 @@ resume_user_mode_begin:
        mov r0, sp      ; pt_regs for arg to do_signal()/do_notify_resume()
 
        GET_CURR_THR_INFO_FLAGS   r9
-       and.f  0,  r9, TIF_SIGPENDING|TIF_NOTIFY_SIGNAL
+       and.f  0,  r9, _TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL
        bz .Lchk_notify_resume
 
        ; Normal Trap/IRQ entry only saves Scratch (caller-saved) regs
index 6b5c54576f54d03a48a7472eed7aa2184b4ed649..a2d10c29fbcc0e315ee8c3fcfb4a5ef8ebebf3a0 100644 (file)
@@ -7,6 +7,7 @@ menuconfig ARC_SOC_HSDK
        depends on ISA_ARCV2
        select ARC_HAS_ACCL_REGS
        select ARC_IRQ_NO_AUTOSAVE
+       select ARC_FPU_SAVE_RESTORE
        select CLK_HSDK
        select RESET_CONTROLLER
        select RESET_HSDK
index 11d41e86f814d26e42249d9ff96ea4c0745d3955..7dde9fbb06d33c83a0e4e5f36e0968ac94e45bfd 100644 (file)
                clock-names = "sysclk";
        };
 };
+
+&aes1_target {
+       status = "disabled";
+};
+
+&aes2_target {
+       status = "disabled";
+};
index c4c6c7e9e37b677b786537744db00202d01dd969..5898879a3038e8fa96ca991c8972689d06da0cde 100644 (file)
                emac: gem@30000 {
                        compatible = "cadence,gem";
                        reg = <0x30000 0x10000>;
+                       interrupt-parent = <&vic0>;
                        interrupts = <31>;
                };
 
                dmac1: dmac@40000 {
                        compatible = "snps,dw-dmac";
                        reg = <0x40000 0x10000>;
+                       interrupt-parent = <&vic0>;
                        interrupts = <25>;
                };
 
                dmac2: dmac@50000 {
                        compatible = "snps,dw-dmac";
                        reg = <0x50000 0x10000>;
+                       interrupt-parent = <&vic0>;
                        interrupts = <26>;
                };
 
                axi2pico@c0000000 {
                        compatible = "picochip,axi2pico-pc3x2";
                        reg = <0xc0000000 0x10000>;
+                       interrupt-parent = <&vic0>;
                        interrupts = <13 14 15 16 17 18 19 20 21>;
                };
        };
index 496f9d3ba7b7ea4bff154769bc54ab1eb98b0b70..60fe6189e728c9ca989d34e73ae1fe1084fd8ecd 100644 (file)
                                panel@0 {
                                        compatible = "samsung,s6e63m0";
                                        reg = <0>;
+                                       max-brightness = <15>;
                                        vdd3-supply = <&panel_reg_3v0>;
                                        vci-supply = <&panel_reg_1v8>;
                                        reset-gpios = <&gpio4 11 GPIO_ACTIVE_LOW>;
index 1c11d1557779aedde6500e11a0ac54c6c6613542..b515c31f0ab75e9ff718da641d8a6b6ee00d72f6 100644 (file)
@@ -279,6 +279,7 @@ CONFIG_SERIAL_OMAP_CONSOLE=y
 CONFIG_SERIAL_DEV_BUS=y
 CONFIG_I2C_CHARDEV=y
 CONFIG_SPI=y
+CONFIG_SPI_GPIO=m
 CONFIG_SPI_OMAP24XX=y
 CONFIG_SPI_TI_QSPI=m
 CONFIG_HSI=m
@@ -296,7 +297,6 @@ CONFIG_GPIO_TWL4030=y
 CONFIG_W1=m
 CONFIG_HDQ_MASTER_OMAP=m
 CONFIG_W1_SLAVE_DS250X=m
-CONFIG_POWER_AVS=y
 CONFIG_POWER_RESET=y
 CONFIG_POWER_RESET_GPIO=y
 CONFIG_BATTERY_BQ27XXX=m
index 7b5cf8430c6dcbad7f6653992ee884552828244d..cdde8fd01f8f9b167e9ab7a7da1d8fff595197bc 100644 (file)
@@ -60,6 +60,7 @@ static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
                chacha_block_xor_neon(state, d, s, nrounds);
                if (d != dst)
                        memcpy(dst, buf, bytes);
+               state[12]++;
        }
 }
 
index 4a0848aef2075c7264e52eea1aec29c9339be99c..03657ff8fbe3d202563184b8902aa181e7474a5e 100644 (file)
@@ -2,7 +2,6 @@
 generic-y += early_ioremap.h
 generic-y += extable.h
 generic-y += flat.h
-generic-y += local64.h
 generic-y += parport.h
 
 generated-y += mach-types.h
index f3191704cab9fc2c3600c74f29d761235e8b140d..56d6814bec26a042b89afc3879b10680aa95f4d4 100644 (file)
@@ -230,10 +230,12 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
                break;
        case BUS_NOTIFY_BIND_DRIVER:
                od = to_omap_device(pdev);
-               if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) &&
-                   pm_runtime_status_suspended(dev)) {
+               if (od) {
                        od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
-                       pm_runtime_set_active(dev);
+                       if (od->_state == OMAP_DEVICE_STATE_ENABLED &&
+                           pm_runtime_status_suspended(dev)) {
+                               pm_runtime_set_active(dev);
+                       }
                }
                break;
        case BUS_NOTIFY_ADD_DEVICE:
index eab281a5fc9f72432f053eef7a39a0b9cacdc967..09076ad0576d98d31cd4d16d97b3fe904143f95c 100644 (file)
@@ -71,7 +71,7 @@ static struct omap_voltdm_pmic omap_cpcap_iva = {
        .vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN,
        .vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX,
        .vddmin = 900000,
-       .vddmax = 1350000,
+       .vddmax = 1375000,
        .vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US,
        .i2c_slave_addr = 0x44,
        .volt_reg_addr = 0x0,
index 60e901cd0de6a31af593f4100c21ee2e3edc3def..5a957a9a09843c1031b7a9a3a46d5a454c417867 100644 (file)
@@ -371,7 +371,7 @@ static int __init xen_guest_init(void)
        }
        gnttab_init();
        if (!xen_initial_domain())
-               xenbus_probe(NULL);
+               xenbus_probe();
 
        /*
         * Making sure board specific code will not set up ops for
index 05e17351e4f334d765744af5172050c843bda79e..f39568b28ec1c47a5abbe2db43be5529111291fa 100644 (file)
@@ -174,8 +174,6 @@ config ARM64
        select HAVE_NMI
        select HAVE_PATA_PLATFORM
        select HAVE_PERF_EVENTS
-       select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI && HW_PERF_EVENTS
-       select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI
        select HAVE_PERF_REGS
        select HAVE_PERF_USER_STACK_DUMP
        select HAVE_REGS_AND_STACK_ACCESS_API
index 6be9b37502503831a6dfa322c46b9d897e2f1458..90309208bb28dd39a7923eca0fd353ce93b14bf6 100644 (file)
@@ -10,7 +10,7 @@
 #
 # Copyright (C) 1995-2001 by Russell King
 
-LDFLAGS_vmlinux        :=--no-undefined -X -z norelro
+LDFLAGS_vmlinux        :=--no-undefined -X
 
 ifeq ($(CONFIG_RELOCATABLE), y)
 # Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
@@ -115,16 +115,20 @@ KBUILD_CPPFLAGS   += -mbig-endian
 CHECKFLAGS     += -D__AARCH64EB__
 # Prefer the baremetal ELF build target, but not all toolchains include
 # it so fall back to the standard linux version if needed.
-KBUILD_LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb)
+KBUILD_LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb -z norelro)
 UTS_MACHINE    := aarch64_be
 else
 KBUILD_CPPFLAGS        += -mlittle-endian
 CHECKFLAGS     += -D__AARCH64EL__
 # Same as above, prefer ELF but fall back to linux target if needed.
-KBUILD_LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux)
+KBUILD_LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux -z norelro)
 UTS_MACHINE    := aarch64
 endif
 
+ifeq ($(CONFIG_LD_IS_LLD), y)
+KBUILD_LDFLAGS += -z norelro
+endif
+
 CHECKFLAGS     += -D__aarch64__
 
 ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_REGS),y)
index fa6e6905f58880f6f6b9447f1bd53ecaa8eb7236..53a9b76057aa1775b9a7f7e5348d2dea68fa45a8 100644 (file)
                                compatible = "snps,dw-apb-gpio-port";
                                gpio-controller;
                                #gpio-cells = <2>;
-                               snps,nr-gpios = <32>;
+                               ngpios = <32>;
                                reg = <0>;
                                interrupt-controller;
                                #interrupt-cells = <2>;
                                compatible = "snps,dw-apb-gpio-port";
                                gpio-controller;
                                #gpio-cells = <2>;
-                               snps,nr-gpios = <32>;
+                               ngpios = <32>;
                                reg = <0>;
                                interrupt-controller;
                                #interrupt-cells = <2>;
                                compatible = "snps,dw-apb-gpio-port";
                                gpio-controller;
                                #gpio-cells = <2>;
-                               snps,nr-gpios = <8>;
+                               ngpios = <8>;
                                reg = <0>;
                                interrupt-controller;
                                #interrupt-cells = <2>;
index ff9cbb6312128ada86abaa827fe07d02ed29e378..07ac208edc89441b06401c960e29a4c43a0b9cf7 100644 (file)
@@ -1,6 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
 generic-y += early_ioremap.h
-generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += qrwlock.h
 generic-y += qspinlock.h
index 015ddffaf6caa3213813fcdcbb5601b1a3ec95d8..b56a4b2bc24864081198bc104ab6f26d6855201c 100644 (file)
@@ -17,7 +17,7 @@
 #include <asm/lse.h>
 
 #define ATOMIC_OP(op)                                                  \
-static inline void arch_##op(int i, atomic_t *v)                       \
+static __always_inline void arch_##op(int i, atomic_t *v)              \
 {                                                                      \
        __lse_ll_sc_body(op, i, v);                                     \
 }
@@ -32,7 +32,7 @@ ATOMIC_OP(atomic_sub)
 #undef ATOMIC_OP
 
 #define ATOMIC_FETCH_OP(name, op)                                      \
-static inline int arch_##op##name(int i, atomic_t *v)                  \
+static __always_inline int arch_##op##name(int i, atomic_t *v)         \
 {                                                                      \
        return __lse_ll_sc_body(op##name, i, v);                        \
 }
@@ -56,7 +56,7 @@ ATOMIC_FETCH_OPS(atomic_sub_return)
 #undef ATOMIC_FETCH_OPS
 
 #define ATOMIC64_OP(op)                                                        \
-static inline void arch_##op(long i, atomic64_t *v)                    \
+static __always_inline void arch_##op(long i, atomic64_t *v)           \
 {                                                                      \
        __lse_ll_sc_body(op, i, v);                                     \
 }
@@ -71,7 +71,7 @@ ATOMIC64_OP(atomic64_sub)
 #undef ATOMIC64_OP
 
 #define ATOMIC64_FETCH_OP(name, op)                                    \
-static inline long arch_##op##name(long i, atomic64_t *v)              \
+static __always_inline long arch_##op##name(long i, atomic64_t *v)     \
 {                                                                      \
        return __lse_ll_sc_body(op##name, i, v);                        \
 }
@@ -94,7 +94,7 @@ ATOMIC64_FETCH_OPS(atomic64_sub_return)
 #undef ATOMIC64_FETCH_OP
 #undef ATOMIC64_FETCH_OPS
 
-static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
+static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
 {
        return __lse_ll_sc_body(atomic64_dec_if_positive, v);
 }
index 11beda85ee7e536693fd65751aebe880421f0c20..8fcfab0c25672db32d47f7eb78fdda1671595ddc 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/jump_label.h>
 #include <linux/kvm_types.h>
 #include <linux/percpu.h>
+#include <linux/psci.h>
 #include <asm/arch_gicv3.h>
 #include <asm/barrier.h>
 #include <asm/cpufeature.h>
@@ -240,6 +241,28 @@ struct kvm_host_data {
        struct kvm_pmu_events pmu_events;
 };
 
+struct kvm_host_psci_config {
+       /* PSCI version used by host. */
+       u32 version;
+
+       /* Function IDs used by host if version is v0.1. */
+       struct psci_0_1_function_ids function_ids_0_1;
+
+       bool psci_0_1_cpu_suspend_implemented;
+       bool psci_0_1_cpu_on_implemented;
+       bool psci_0_1_cpu_off_implemented;
+       bool psci_0_1_migrate_implemented;
+};
+
+extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
+#define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
+
+extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
+#define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
+
+extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
+#define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
+
 struct vcpu_reset_state {
        unsigned long   pc;
        unsigned long   r0;
index 6f986e09a7815e804cd73902e2537147ec733864..f0fe0cc6abe0b1636e1cd894097a1cf1a985b427 100644 (file)
@@ -176,10 +176,21 @@ static inline void __uaccess_enable_hw_pan(void)
  * The Tag check override (TCO) bit disables temporarily the tag checking
  * preventing the issue.
  */
-static inline void uaccess_disable_privileged(void)
+static inline void __uaccess_disable_tco(void)
 {
        asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0),
                                 ARM64_MTE, CONFIG_KASAN_HW_TAGS));
+}
+
+static inline void __uaccess_enable_tco(void)
+{
+       asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
+                                ARM64_MTE, CONFIG_KASAN_HW_TAGS));
+}
+
+static inline void uaccess_disable_privileged(void)
+{
+       __uaccess_disable_tco();
 
        if (uaccess_ttbr0_disable())
                return;
@@ -189,8 +200,7 @@ static inline void uaccess_disable_privileged(void)
 
 static inline void uaccess_enable_privileged(void)
 {
-       asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
-                                ARM64_MTE, CONFIG_KASAN_HW_TAGS));
+       __uaccess_enable_tco();
 
        if (uaccess_ttbr0_enable())
                return;
index f42fd9e339815f7b97525cd30b1ebe61692393c0..3017844635874838c968780c48a1c4f22312435b 100644 (file)
@@ -75,7 +75,7 @@ int main(void)
   DEFINE(S_SDEI_TTBR1,         offsetof(struct pt_regs, sdei_ttbr1));
   DEFINE(S_PMR_SAVE,           offsetof(struct pt_regs, pmr_save));
   DEFINE(S_STACKFRAME,         offsetof(struct pt_regs, stackframe));
-  DEFINE(S_FRAME_SIZE,         sizeof(struct pt_regs));
+  DEFINE(PT_REGS_SIZE,         sizeof(struct pt_regs));
   BLANK();
 #ifdef CONFIG_COMPAT
   DEFINE(COMPAT_SIGFRAME_REGS_OFFSET,          offsetof(struct compat_sigframe, uc.uc_mcontext.arm_r0));
index 7ffb5f1d8b68205498c3409de75d4ff54c3b97c4..e99eddec0a46925b82b4f434611e2907e6747a86 100644 (file)
@@ -2568,7 +2568,7 @@ static void verify_hyp_capabilities(void)
        int parange, ipa_max;
        unsigned int safe_vmid_bits, vmid_bits;
 
-       if (!IS_ENABLED(CONFIG_KVM) || !IS_ENABLED(CONFIG_KVM_ARM_HOST))
+       if (!IS_ENABLED(CONFIG_KVM))
                return;
 
        safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
index a338f40e64d393b2f78d5b01b4bda24930e0db87..b3e4f9a088b1a76118205f0065fda2cc6859aa2f 100644 (file)
@@ -35,7 +35,7 @@
  */
        .macro  ftrace_regs_entry, allregs=0
        /* Make room for pt_regs, plus a callee frame */
-       sub     sp, sp, #(S_FRAME_SIZE + 16)
+       sub     sp, sp, #(PT_REGS_SIZE + 16)
 
        /* Save function arguments (and x9 for simplicity) */
        stp     x0, x1, [sp, #S_X0]
        .endif
 
        /* Save the callsite's SP and LR */
-       add     x10, sp, #(S_FRAME_SIZE + 16)
+       add     x10, sp, #(PT_REGS_SIZE + 16)
        stp     x9, x10, [sp, #S_LR]
 
        /* Save the PC after the ftrace callsite */
        str     x30, [sp, #S_PC]
 
        /* Create a frame record for the callsite above pt_regs */
-       stp     x29, x9, [sp, #S_FRAME_SIZE]
-       add     x29, sp, #S_FRAME_SIZE
+       stp     x29, x9, [sp, #PT_REGS_SIZE]
+       add     x29, sp, #PT_REGS_SIZE
 
        /* Create our frame record within pt_regs. */
        stp     x29, x30, [sp, #S_STACKFRAME]
@@ -120,7 +120,7 @@ ftrace_common_return:
        ldr     x9, [sp, #S_PC]
 
        /* Restore the callsite's SP */
-       add     sp, sp, #S_FRAME_SIZE + 16
+       add     sp, sp, #PT_REGS_SIZE + 16
 
        ret     x9
 SYM_CODE_END(ftrace_common)
@@ -130,7 +130,7 @@ SYM_CODE_START(ftrace_graph_caller)
        ldr     x0, [sp, #S_PC]
        sub     x0, x0, #AARCH64_INSN_SIZE      // ip (callsite's BL insn)
        add     x1, sp, #S_LR                   // parent_ip (callsite's LR)
-       ldr     x2, [sp, #S_FRAME_SIZE]         // parent fp (callsite's FP)
+       ldr     x2, [sp, #PT_REGS_SIZE]         // parent fp (callsite's FP)
        bl      prepare_ftrace_return
        b       ftrace_common_return
 SYM_CODE_END(ftrace_graph_caller)
index 2a93fa5f4e49d8c5daf04b412b613f9bdae85adc..c9bae73f2621a345d9b590cf896a2e8ecc5d47c4 100644 (file)
@@ -75,7 +75,7 @@ alternative_else_nop_endif
        .endif
 #endif
 
-       sub     sp, sp, #S_FRAME_SIZE
+       sub     sp, sp, #PT_REGS_SIZE
 #ifdef CONFIG_VMAP_STACK
        /*
         * Test whether the SP has overflowed, without corrupting a GPR.
@@ -96,7 +96,7 @@ alternative_else_nop_endif
         * userspace, and can clobber EL0 registers to free up GPRs.
         */
 
-       /* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
+       /* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */
        msr     tpidr_el0, x0
 
        /* Recover the original x0 value and stash it in tpidrro_el0 */
@@ -182,7 +182,6 @@ alternative_else_nop_endif
        mrs_s   \tmp2, SYS_GCR_EL1
        bfi     \tmp2, \tmp, #0, #16
        msr_s   SYS_GCR_EL1, \tmp2
-       isb
 #endif
        .endm
 
@@ -194,6 +193,7 @@ alternative_else_nop_endif
        ldr_l   \tmp, gcr_kernel_excl
 
        mte_set_gcr \tmp, \tmp2
+       isb
 1:
 #endif
        .endm
@@ -253,7 +253,7 @@ alternative_else_nop_endif
 
        scs_load tsk, x20
        .else
-       add     x21, sp, #S_FRAME_SIZE
+       add     x21, sp, #PT_REGS_SIZE
        get_current_task tsk
        .endif /* \el == 0 */
        mrs     x22, elr_el1
@@ -377,7 +377,7 @@ alternative_else_nop_endif
        ldp     x26, x27, [sp, #16 * 13]
        ldp     x28, x29, [sp, #16 * 14]
        ldr     lr, [sp, #S_LR]
-       add     sp, sp, #S_FRAME_SIZE           // restore sp
+       add     sp, sp, #PT_REGS_SIZE           // restore sp
 
        .if     \el == 0
 alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
@@ -580,12 +580,12 @@ __bad_stack:
 
        /*
         * Store the original GPRs to the new stack. The orginal SP (minus
-        * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
+        * PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry.
         */
-       sub     sp, sp, #S_FRAME_SIZE
+       sub     sp, sp, #PT_REGS_SIZE
        kernel_entry 1
        mrs     x0, tpidr_el0
-       add     x0, x0, #S_FRAME_SIZE
+       add     x0, x0, #PT_REGS_SIZE
        str     x0, [sp, #S_SP]
 
        /* Stash the regs for handle_bad_stack */
index 38bb07eff8720d238e1d03759f59fb76e8f5ee05..3605f77ad4df1796c52b01a2bb959569d5ea1a83 100644 (file)
@@ -23,8 +23,6 @@
 #include <linux/platform_device.h>
 #include <linux/sched_clock.h>
 #include <linux/smp.h>
-#include <linux/nmi.h>
-#include <linux/cpufreq.h>
 
 /* ARMv8 Cortex-A53 specific event types. */
 #define ARMV8_A53_PERFCTR_PREF_LINEFILL                                0xC2
@@ -1250,21 +1248,10 @@ static struct platform_driver armv8_pmu_driver = {
 
 static int __init armv8_pmu_driver_init(void)
 {
-       int ret;
-
        if (acpi_disabled)
-               ret = platform_driver_register(&armv8_pmu_driver);
+               return platform_driver_register(&armv8_pmu_driver);
        else
-               ret = arm_pmu_acpi_probe(armv8_pmuv3_init);
-
-       /*
-        * Try to re-initialize lockup detector after PMU init in
-        * case PMU events are triggered via NMIs.
-        */
-       if (ret == 0 && arm_pmu_irq_is_nmi())
-               lockup_detector_init();
-
-       return ret;
+               return arm_pmu_acpi_probe(armv8_pmuv3_init);
 }
 device_initcall(armv8_pmu_driver_init)
 
@@ -1322,27 +1309,3 @@ void arch_perf_update_userpage(struct perf_event *event,
        userpg->cap_user_time_zero = 1;
        userpg->cap_user_time_short = 1;
 }
-
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
-/*
- * Safe maximum CPU frequency in case a particular platform doesn't implement
- * cpufreq driver. Although, architecture doesn't put any restrictions on
- * maximum frequency but 5 GHz seems to be safe maximum given the available
- * Arm CPUs in the market which are clocked much less than 5 GHz. On the other
- * hand, we can't make it much higher as it would lead to a large hard-lockup
- * detection timeout on parts which are running slower (eg. 1GHz on
- * Developerbox) and doesn't possess a cpufreq driver.
- */
-#define SAFE_MAX_CPU_FREQ      5000000000UL // 5 GHz
-u64 hw_nmi_get_sample_period(int watchdog_thresh)
-{
-       unsigned int cpu = smp_processor_id();
-       unsigned long max_cpu_freq;
-
-       max_cpu_freq = cpufreq_get_hw_max_freq(cpu) * 1000UL;
-       if (!max_cpu_freq)
-               max_cpu_freq = SAFE_MAX_CPU_FREQ;
-
-       return (u64)max_cpu_freq * watchdog_thresh;
-}
-#endif
index 890ca72c5a5148a662c52de78f42c81c88abf420..288a84e253ccbecc227a8356e20819a1099266bd 100644 (file)
@@ -25,7 +25,7 @@
        stp x24, x25, [sp, #S_X24]
        stp x26, x27, [sp, #S_X26]
        stp x28, x29, [sp, #S_X28]
-       add x0, sp, #S_FRAME_SIZE
+       add x0, sp, #PT_REGS_SIZE
        stp lr, x0, [sp, #S_LR]
        /*
         * Construct a useful saved PSTATE
@@ -62,7 +62,7 @@
        .endm
 
 SYM_CODE_START(kretprobe_trampoline)
-       sub sp, sp, #S_FRAME_SIZE
+       sub sp, sp, #PT_REGS_SIZE
 
        save_all_base_regs
 
@@ -76,7 +76,7 @@ SYM_CODE_START(kretprobe_trampoline)
 
        restore_all_base_regs
 
-       add sp, sp, #S_FRAME_SIZE
+       add sp, sp, #PT_REGS_SIZE
        ret
 
 SYM_CODE_END(kretprobe_trampoline)
index f71d6ce4673f53e5e935514791a52d6149b3b9e5..6237486ff6bb73db074d6e275c7d739548a1a7d0 100644 (file)
@@ -914,13 +914,6 @@ static void do_signal(struct pt_regs *regs)
 asmlinkage void do_notify_resume(struct pt_regs *regs,
                                 unsigned long thread_flags)
 {
-       /*
-        * The assembly code enters us with IRQs off, but it hasn't
-        * informed the tracing code of that for efficiency reasons.
-        * Update the trace code with the current status.
-        */
-       trace_hardirqs_off();
-
        do {
                if (thread_flags & _TIF_NEED_RESCHED) {
                        /* Unmask Debug and SError for the next task */
index 6bc3a3698c3d1ea9178a239c8ad96835b0d9d1c2..ad00f99ee9b0482c8df3e429a2d1a0c4d426af0b 100644 (file)
@@ -434,7 +434,7 @@ static void __init hyp_mode_check(void)
                           "CPU: CPUs started in inconsistent modes");
        else
                pr_info("CPU: All CPU(s) started at EL1\n");
-       if (IS_ENABLED(CONFIG_KVM))
+       if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode())
                kvm_compute_layout();
 }
 
@@ -807,7 +807,6 @@ int arch_show_interrupts(struct seq_file *p, int prec)
        unsigned int cpu, i;
 
        for (i = 0; i < NR_IPI; i++) {
-               unsigned int irq = irq_desc_get_irq(ipi_desc[i]);
                seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
                           prec >= 4 ? " " : "");
                for_each_online_cpu(cpu)
index f61e9d8cc55a1efe2cda0bb94f67177bb79acef6..c2877c332f2dc636a403193bff46e2df84001df5 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <asm/daifflags.h>
 #include <asm/debug-monitors.h>
+#include <asm/exception.h>
 #include <asm/fpsimd.h>
 #include <asm/syscall.h>
 #include <asm/thread_info.h>
@@ -165,15 +166,8 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
        if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
                local_daif_mask();
                flags = current_thread_info()->flags;
-               if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) {
-                       /*
-                        * We're off to userspace, where interrupts are
-                        * always enabled after we restore the flags from
-                        * the SPSR.
-                        */
-                       trace_hardirqs_on();
+               if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP))
                        return;
-               }
                local_daif_restore(DAIF_PROCCTX);
        }
 
index 08156be755691d6cb7dd239119326ef4a3904b87..6895ce777e7f286ce82da4fb8c54a3ac58cd0b37 100644 (file)
@@ -42,7 +42,6 @@
 #include <asm/smp.h>
 #include <asm/stack_pointer.h>
 #include <asm/stacktrace.h>
-#include <asm/exception.h>
 #include <asm/system_misc.h>
 #include <asm/sysreg.h>
 
index a8f8e409e2bfbcdd2a9c0318eb41465570a83882..cd9c3fa25902fcff85958e2d3dd3b6a1a1e0756b 100644 (file)
@@ -24,8 +24,7 @@ btildflags-$(CONFIG_ARM64_BTI_KERNEL) += -z force-bti
 # routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so
 # preparation in build-time C")).
 ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv       \
-            -Bsymbolic $(call ld-option, --no-eh-frame-hdr) --build-id=sha1 -n \
-            $(btildflags-y) -T
+            -Bsymbolic --build-id=sha1 -n $(btildflags-y) -T
 
 ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
 ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
index d808ad31e01f766d799449b81fb4e16c3f477157..61dbb4c838ef75291aa983f75af1a0dde1eb3794 100644 (file)
@@ -40,9 +40,6 @@ SECTIONS
        PROVIDE (_etext = .);
        PROVIDE (etext = .);
 
-       .eh_frame_hdr   : { *(.eh_frame_hdr) }          :text   :eh_frame_hdr
-       .eh_frame       : { KEEP (*(.eh_frame)) }       :text
-
        .dynamic        : { *(.dynamic) }               :text   :dynamic
 
        .rodata         : { *(.rodata*) }               :text
@@ -54,6 +51,7 @@ SECTIONS
                *(.note.GNU-stack)
                *(.data .data.* .gnu.linkonce.d.* .sdata*)
                *(.bss .sbss .dynbss .dynsbss)
+               *(.eh_frame .eh_frame_hdr)
        }
 }
 
@@ -66,7 +64,6 @@ PHDRS
        text            PT_LOAD         FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
        dynamic         PT_DYNAMIC      FLAGS(4);               /* PF_R */
        note            PT_NOTE         FLAGS(4);               /* PF_R */
-       eh_frame_hdr    PT_GNU_EH_FRAME;
 }
 
 /*
index 043756db8f6ec27c72fc0fc1d4c261d82f87b445..3964acf5451eacec41369ec2688e5cf3eab3906d 100644 (file)
@@ -49,14 +49,6 @@ if KVM
 
 source "virt/kvm/Kconfig"
 
-config KVM_ARM_PMU
-       bool "Virtual Performance Monitoring Unit (PMU) support"
-       depends on HW_PERF_EVENTS
-       default y
-       help
-         Adds support for a virtual Performance Monitoring Unit (PMU) in
-         virtual machines.
-
 endif # KVM
 
 endif # VIRTUALIZATION
index 60fd181df6243f93ecc27219ec0a9fedf87ff7df..13b017284bf96c761b040e7db41631afda275daf 100644 (file)
@@ -24,4 +24,4 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
         vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \
         vgic/vgic-its.o vgic/vgic-debug.o
 
-kvm-$(CONFIG_KVM_ARM_PMU)  += pmu-emul.o
+kvm-$(CONFIG_HW_PERF_EVENTS)  += pmu-emul.o
index 32ba6fbc38141a59b5d70d8d9cd2689e86498e3b..74e0699661e90cee74094136c6a052c965cd5073 100644 (file)
@@ -1129,9 +1129,10 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
        if (!irqchip_in_kernel(vcpu->kvm))
                goto no_vgic;
 
-       if (!vgic_initialized(vcpu->kvm))
-               return -ENODEV;
-
+       /*
+        * At this stage, we have the guarantee that the vgic is both
+        * available and initialized.
+        */
        if (!timer_irqs_are_valid(vcpu)) {
                kvm_debug("incorrectly configured timer irqs\n");
                return -EINVAL;
index 6e637d2b4cfb7fe0978fd491f4506da0cc7793e7..04c44853b103b1681d600d853f5d10160147552c 100644 (file)
@@ -65,10 +65,6 @@ static bool vgic_present;
 static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
 DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
 
-extern u64 kvm_nvhe_sym(__cpu_logical_map)[NR_CPUS];
-extern u32 kvm_nvhe_sym(kvm_host_psci_version);
-extern struct psci_0_1_function_ids kvm_nvhe_sym(kvm_host_psci_0_1_function_ids);
-
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 {
        return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
@@ -584,11 +580,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
                 * Map the VGIC hardware resources before running a vcpu the
                 * first time on this VM.
                 */
-               if (unlikely(!vgic_ready(kvm))) {
-                       ret = kvm_vgic_map_resources(kvm);
-                       if (ret)
-                               return ret;
-               }
+               ret = kvm_vgic_map_resources(kvm);
+               if (ret)
+                       return ret;
        } else {
                /*
                 * Tell the rest of the code that there are userspace irqchip
@@ -1574,12 +1568,12 @@ static struct notifier_block hyp_init_cpu_pm_nb = {
        .notifier_call = hyp_init_cpu_pm_notifier,
 };
 
-static void __init hyp_cpu_pm_init(void)
+static void hyp_cpu_pm_init(void)
 {
        if (!is_protected_kvm_enabled())
                cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
 }
-static void __init hyp_cpu_pm_exit(void)
+static void hyp_cpu_pm_exit(void)
 {
        if (!is_protected_kvm_enabled())
                cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
@@ -1604,9 +1598,12 @@ static void init_cpu_logical_map(void)
         * allow any other CPUs from the `possible` set to boot.
         */
        for_each_online_cpu(cpu)
-               kvm_nvhe_sym(__cpu_logical_map)[cpu] = cpu_logical_map(cpu);
+               hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu);
 }
 
+#define init_psci_0_1_impl_state(config, what) \
+       config.psci_0_1_ ## what ## _implemented = psci_ops.what
+
 static bool init_psci_relay(void)
 {
        /*
@@ -1618,8 +1615,15 @@ static bool init_psci_relay(void)
                return false;
        }
 
-       kvm_nvhe_sym(kvm_host_psci_version) = psci_ops.get_version();
-       kvm_nvhe_sym(kvm_host_psci_0_1_function_ids) = get_psci_0_1_function_ids();
+       kvm_host_psci_config.version = psci_ops.get_version();
+
+       if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
+               kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
+               init_psci_0_1_impl_state(kvm_host_psci_config, cpu_suspend);
+               init_psci_0_1_impl_state(kvm_host_psci_config, cpu_on);
+               init_psci_0_1_impl_state(kvm_host_psci_config, cpu_off);
+               init_psci_0_1_impl_state(kvm_host_psci_config, migrate);
+       }
        return true;
 }
 
index b1f60923a8feb80ff676a0ba84f3b23f3e4f398c..61716359035d6fa2a600a4def8d731681617de09 100644 (file)
@@ -59,4 +59,13 @@ static inline void __adjust_pc(struct kvm_vcpu *vcpu)
        }
 }
 
+/*
+ * Skip an instruction while host sysregs are live.
+ * Assumes host is always 64-bit.
+ */
+static inline void kvm_skip_host_instr(void)
+{
+       write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
+}
+
 #endif
index bde658d51404b46a64f6406e250f7713b191e622..a906f9e2ff34fba2d5588263b1f024e23b09963d 100644 (file)
@@ -157,11 +157,6 @@ static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
        __kvm_hyp_host_forward_smc(host_ctxt);
 }
 
-static void skip_host_instruction(void)
-{
-       write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
-}
-
 static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
 {
        bool handled;
@@ -170,11 +165,8 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
        if (!handled)
                default_host_smc_handler(host_ctxt);
 
-       /*
-        * Unlike HVC, the return address of an SMC is the instruction's PC.
-        * Move the return address past the instruction.
-        */
-       skip_host_instruction();
+       /* SMC was trapped, move ELR past the current PC. */
+       kvm_skip_host_instr();
 }
 
 void handle_trap(struct kvm_cpu_context *host_ctxt)
index cbab0c6246e20fdafe100846d7471ab775adda0b..2997aa156d8e5c2d17ad241e1abcc388c6116276 100644 (file)
  * Other CPUs should not be allowed to boot because their features were
  * not checked against the finalized system capabilities.
  */
-u64 __ro_after_init __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
+u64 __ro_after_init hyp_cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
 
 u64 cpu_logical_map(unsigned int cpu)
 {
-       if (cpu >= ARRAY_SIZE(__cpu_logical_map))
+       if (cpu >= ARRAY_SIZE(hyp_cpu_logical_map))
                hyp_panic();
 
-       return __cpu_logical_map[cpu];
+       return hyp_cpu_logical_map[cpu];
 }
 
 unsigned long __hyp_per_cpu_offset(unsigned int cpu)
index 08dc9de693147fbb964271110940b99bc2c4727a..e3947846ffcb9acd8d4528190741fdb48325e711 100644 (file)
@@ -7,11 +7,8 @@
 #include <asm/kvm_asm.h>
 #include <asm/kvm_hyp.h>
 #include <asm/kvm_mmu.h>
-#include <kvm/arm_hypercalls.h>
 #include <linux/arm-smccc.h>
 #include <linux/kvm_host.h>
-#include <linux/psci.h>
-#include <kvm/arm_psci.h>
 #include <uapi/linux/psci.h>
 
 #include <nvhe/trap_handler.h>
@@ -22,9 +19,8 @@ void kvm_hyp_cpu_resume(unsigned long r0);
 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
 
 /* Config options set by the host. */
-__ro_after_init u32 kvm_host_psci_version;
-__ro_after_init struct psci_0_1_function_ids kvm_host_psci_0_1_function_ids;
-__ro_after_init s64 hyp_physvirt_offset;
+struct kvm_host_psci_config __ro_after_init kvm_host_psci_config;
+s64 __ro_after_init hyp_physvirt_offset;
 
 #define __hyp_pa(x) ((phys_addr_t)((x)) + hyp_physvirt_offset)
 
@@ -47,19 +43,16 @@ struct psci_boot_args {
 static DEFINE_PER_CPU(struct psci_boot_args, cpu_on_args) = PSCI_BOOT_ARGS_INIT;
 static DEFINE_PER_CPU(struct psci_boot_args, suspend_args) = PSCI_BOOT_ARGS_INIT;
 
-static u64 get_psci_func_id(struct kvm_cpu_context *host_ctxt)
-{
-       DECLARE_REG(u64, func_id, host_ctxt, 0);
-
-       return func_id;
-}
+#define        is_psci_0_1(what, func_id)                                      \
+       (kvm_host_psci_config.psci_0_1_ ## what ## _implemented &&      \
+        (func_id) == kvm_host_psci_config.function_ids_0_1.what)
 
 static bool is_psci_0_1_call(u64 func_id)
 {
-       return (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend) ||
-              (func_id == kvm_host_psci_0_1_function_ids.cpu_on) ||
-              (func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
-              (func_id == kvm_host_psci_0_1_function_ids.migrate);
+       return (is_psci_0_1(cpu_suspend, func_id) ||
+               is_psci_0_1(cpu_on, func_id) ||
+               is_psci_0_1(cpu_off, func_id) ||
+               is_psci_0_1(migrate, func_id));
 }
 
 static bool is_psci_0_2_call(u64 func_id)
@@ -69,16 +62,6 @@ static bool is_psci_0_2_call(u64 func_id)
               (PSCI_0_2_FN64(0) <= func_id && func_id <= PSCI_0_2_FN64(31));
 }
 
-static bool is_psci_call(u64 func_id)
-{
-       switch (kvm_host_psci_version) {
-       case PSCI_VERSION(0, 1):
-               return is_psci_0_1_call(func_id);
-       default:
-               return is_psci_0_2_call(func_id);
-       }
-}
-
 static unsigned long psci_call(unsigned long fn, unsigned long arg0,
                               unsigned long arg1, unsigned long arg2)
 {
@@ -248,15 +231,14 @@ asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)
 
 static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
 {
-       if ((func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
-           (func_id == kvm_host_psci_0_1_function_ids.migrate))
+       if (is_psci_0_1(cpu_off, func_id) || is_psci_0_1(migrate, func_id))
                return psci_forward(host_ctxt);
-       else if (func_id == kvm_host_psci_0_1_function_ids.cpu_on)
+       if (is_psci_0_1(cpu_on, func_id))
                return psci_cpu_on(func_id, host_ctxt);
-       else if (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend)
+       if (is_psci_0_1(cpu_suspend, func_id))
                return psci_cpu_suspend(func_id, host_ctxt);
-       else
-               return PSCI_RET_NOT_SUPPORTED;
+
+       return PSCI_RET_NOT_SUPPORTED;
 }
 
 static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
@@ -298,20 +280,23 @@ static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_
 
 bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
 {
-       u64 func_id = get_psci_func_id(host_ctxt);
+       DECLARE_REG(u64, func_id, host_ctxt, 0);
        unsigned long ret;
 
-       if (!is_psci_call(func_id))
-               return false;
-
-       switch (kvm_host_psci_version) {
+       switch (kvm_host_psci_config.version) {
        case PSCI_VERSION(0, 1):
+               if (!is_psci_0_1_call(func_id))
+                       return false;
                ret = psci_0_1_handler(func_id, host_ctxt);
                break;
        case PSCI_VERSION(0, 2):
+               if (!is_psci_0_2_call(func_id))
+                       return false;
                ret = psci_0_2_handler(func_id, host_ctxt);
                break;
        default:
+               if (!is_psci_0_2_call(func_id))
+                       return false;
                ret = psci_1_0_handler(func_id, host_ctxt);
                break;
        }
index 398f6df1bbe40ad298831b2a7acc109f8acad0ab..4ad66a532e38b37007fd75610e15a426b7123bd8 100644 (file)
@@ -850,8 +850,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
                   return -EINVAL;
        }
 
-       kvm_pmu_vcpu_reset(vcpu);
-
        return 0;
 }
 
index 3313dedfa5053413bae960bd44eee01439cb63df..42ccc27fb684cad8bce5c5587fa5291468ee0b92 100644 (file)
@@ -594,6 +594,10 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 {
        u64 pmcr, val;
 
+       /* No PMU available, PMCR_EL0 may UNDEF... */
+       if (!kvm_arm_support_pmu_v3())
+               return;
+
        pmcr = read_sysreg(pmcr_el0);
        /*
         * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
@@ -919,7 +923,7 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 
 #define reg_to_encoding(x)                                             \
        sys_reg((u32)(x)->Op0, (u32)(x)->Op1,                           \
-               (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2);
+               (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
 
 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
 #define DBG_BCR_BVR_WCR_WVR_EL1(n)                                     \
index d8cc51bd60bf22a7769de23536fca461413f7bb9..70fcd6a12fe1f1f1f7830e0f4370aff3b9ce98c4 100644 (file)
@@ -34,17 +34,16 @@ static u64 __early_kern_hyp_va(u64 addr)
 }
 
 /*
- * Store a hyp VA <-> PA offset into a hyp-owned variable.
+ * Store a hyp VA <-> PA offset into a EL2-owned variable.
  */
 static void init_hyp_physvirt_offset(void)
 {
-       extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
        u64 kern_va, hyp_va;
 
        /* Compute the offset from the hyp VA and PA of a random symbol. */
-       kern_va = (u64)kvm_ksym_ref(__hyp_text_start);
+       kern_va = (u64)lm_alias(__hyp_text_start);
        hyp_va = __early_kern_hyp_va(kern_va);
-       CHOOSE_NVHE_SYM(hyp_physvirt_offset) = (s64)__pa(kern_va) - (s64)hyp_va;
+       hyp_physvirt_offset = (s64)__pa(kern_va) - (s64)hyp_va;
 }
 
 /*
index 32e32d67a127f0e7a3b59a4e5b36d1c9c124d7b9..052917deb14951575f1f8a78e68e6d1a3e61cc4c 100644 (file)
@@ -419,7 +419,8 @@ int vgic_lazy_init(struct kvm *kvm)
  * Map the MMIO regions depending on the VGIC model exposed to the guest
  * called on the first VCPU run.
  * Also map the virtual CPU interface into the VM.
- * v2/v3 derivatives call vgic_init if not already done.
+ * v2 calls vgic_init() if not already done.
+ * v3 and derivatives return an error if the VGIC is not initialized.
  * vgic_ready() returns true if this function has succeeded.
  * @kvm: kvm struct pointer
  */
@@ -428,7 +429,13 @@ int kvm_vgic_map_resources(struct kvm *kvm)
        struct vgic_dist *dist = &kvm->arch.vgic;
        int ret = 0;
 
+       if (likely(vgic_ready(kvm)))
+               return 0;
+
        mutex_lock(&kvm->lock);
+       if (vgic_ready(kvm))
+               goto out;
+
        if (!irqchip_in_kernel(kvm))
                goto out;
 
@@ -439,6 +446,8 @@ int kvm_vgic_map_resources(struct kvm *kvm)
 
        if (ret)
                __kvm_vgic_destroy(kvm);
+       else
+               dist->ready = true;
 
 out:
        mutex_unlock(&kvm->lock);
index ebf53a4e129630fef87b018021eb5f393baca16a..11934c2af2f42711feddab663edb4e9fc2bdca02 100644 (file)
@@ -306,20 +306,15 @@ int vgic_v2_map_resources(struct kvm *kvm)
        struct vgic_dist *dist = &kvm->arch.vgic;
        int ret = 0;
 
-       if (vgic_ready(kvm))
-               goto out;
-
        if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
            IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
                kvm_err("Need to set vgic cpu and dist addresses first\n");
-               ret = -ENXIO;
-               goto out;
+               return -ENXIO;
        }
 
        if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
                kvm_err("VGIC CPU and dist frames overlap\n");
-               ret = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
 
        /*
@@ -329,13 +324,13 @@ int vgic_v2_map_resources(struct kvm *kvm)
        ret = vgic_init(kvm);
        if (ret) {
                kvm_err("Unable to initialize VGIC dynamic data structures\n");
-               goto out;
+               return ret;
        }
 
        ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
        if (ret) {
                kvm_err("Unable to register VGIC MMIO regions\n");
-               goto out;
+               return ret;
        }
 
        if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
@@ -344,14 +339,11 @@ int vgic_v2_map_resources(struct kvm *kvm)
                                            KVM_VGIC_V2_CPU_SIZE, true);
                if (ret) {
                        kvm_err("Unable to remap VGIC CPU to VCPU\n");
-                       goto out;
+                       return ret;
                }
        }
 
-       dist->ready = true;
-
-out:
-       return ret;
+       return 0;
 }
 
 DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap);
index 9cdf39a94a635697257f15b727e8b2886547f15c..52915b34235143a3248ec2f958f9a6dc7c486f2c 100644 (file)
@@ -500,29 +500,23 @@ int vgic_v3_map_resources(struct kvm *kvm)
        int ret = 0;
        int c;
 
-       if (vgic_ready(kvm))
-               goto out;
-
        kvm_for_each_vcpu(c, vcpu, kvm) {
                struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 
                if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
                        kvm_debug("vcpu %d redistributor base not set\n", c);
-                       ret = -ENXIO;
-                       goto out;
+                       return -ENXIO;
                }
        }
 
        if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
                kvm_err("Need to set vgic distributor addresses first\n");
-               ret = -ENXIO;
-               goto out;
+               return -ENXIO;
        }
 
        if (!vgic_v3_check_base(kvm)) {
                kvm_err("VGIC redist and dist frames overlap\n");
-               ret = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
 
        /*
@@ -530,22 +524,19 @@ int vgic_v3_map_resources(struct kvm *kvm)
         * the VGIC before we need to use it.
         */
        if (!vgic_initialized(kvm)) {
-               ret = -EBUSY;
-               goto out;
+               return -EBUSY;
        }
 
        ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
        if (ret) {
                kvm_err("Unable to register VGICv3 dist MMIO regions\n");
-               goto out;
+               return ret;
        }
 
        if (kvm_vgic_global_state.has_gicv4_1)
                vgic_v4_configure_vsgis(kvm);
-       dist->ready = true;
 
-out:
-       return ret;
+       return 0;
 }
 
 DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
index 75addb36354aa96f5224be6c8382e3f0ce6dd693..709d98fea90cc1f8b08c85864fbaa6fd6a0b7d42 100644 (file)
@@ -53,13 +53,13 @@ s64 memstart_addr __ro_after_init = -1;
 EXPORT_SYMBOL(memstart_addr);
 
 /*
- * We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
- * memory as some devices, namely the Raspberry Pi 4, have peripherals with
- * this limited view of the memory. ZONE_DMA32 will cover the rest of the 32
- * bit addressable memory area.
+ * If the corresponding config options are enabled, we create both ZONE_DMA
+ * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory
+ * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
+ * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
+ * otherwise it is empty.
  */
 phys_addr_t arm64_dma_phys_limit __ro_after_init;
-static phys_addr_t arm64_dma32_phys_limit __ro_after_init;
 
 #ifdef CONFIG_KEXEC_CORE
 /*
@@ -84,7 +84,7 @@ static void __init reserve_crashkernel(void)
 
        if (crash_base == 0) {
                /* Current arm64 boot protocol requires 2MB alignment */
-               crash_base = memblock_find_in_range(0, arm64_dma32_phys_limit,
+               crash_base = memblock_find_in_range(0, arm64_dma_phys_limit,
                                crash_size, SZ_2M);
                if (crash_base == 0) {
                        pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
@@ -196,6 +196,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
        unsigned long max_zone_pfns[MAX_NR_ZONES]  = {0};
        unsigned int __maybe_unused acpi_zone_dma_bits;
        unsigned int __maybe_unused dt_zone_dma_bits;
+       phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32);
 
 #ifdef CONFIG_ZONE_DMA
        acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
@@ -205,8 +206,12 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
        max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
 #endif
 #ifdef CONFIG_ZONE_DMA32
-       max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma32_phys_limit);
+       max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
+       if (!arm64_dma_phys_limit)
+               arm64_dma_phys_limit = dma32_phys_limit;
 #endif
+       if (!arm64_dma_phys_limit)
+               arm64_dma_phys_limit = PHYS_MASK + 1;
        max_zone_pfns[ZONE_NORMAL] = max;
 
        free_area_init(max_zone_pfns);
@@ -394,16 +399,9 @@ void __init arm64_memblock_init(void)
 
        early_init_fdt_scan_reserved_mem();
 
-       if (IS_ENABLED(CONFIG_ZONE_DMA32))
-               arm64_dma32_phys_limit = max_zone_phys(32);
-       else
-               arm64_dma32_phys_limit = PHYS_MASK + 1;
-
        reserve_elfcorehdr();
 
        high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
-
-       dma_contiguous_reserve(arm64_dma32_phys_limit);
 }
 
 void __init bootmem_init(void)
@@ -438,6 +436,11 @@ void __init bootmem_init(void)
        sparse_init();
        zone_sizes_init(min, max);
 
+       /*
+        * Reserve the CMA area after arm64_dma_phys_limit was initialised.
+        */
+       dma_contiguous_reserve(arm64_dma_phys_limit);
+
        /*
         * request_standard_resources() depends on crashkernel's memory being
         * reserved, so do it here.
@@ -455,7 +458,7 @@ void __init bootmem_init(void)
 void __init mem_init(void)
 {
        if (swiotlb_force == SWIOTLB_FORCE ||
-           max_pfn > PFN_DOWN(arm64_dma_phys_limit ? : arm64_dma32_phys_limit))
+           max_pfn > PFN_DOWN(arm64_dma_phys_limit))
                swiotlb_init(1);
        else
                swiotlb_force = SWIOTLB_NO_FORCE;
index 37a54b57178a7aa44b8050cd552e34df5730509a..1f7ee8c8b7b811d5537ca91b66e5822320de10b9 100644 (file)
@@ -46,7 +46,7 @@
 #endif
 
 #ifdef CONFIG_KASAN_HW_TAGS
-#define TCR_KASAN_HW_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1
+#define TCR_KASAN_HW_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
 #else
 #define TCR_KASAN_HW_FLAGS 0
 #endif
index 93372255984dce5c1578a735f9575dc6c18e75d9..cc24bb8e539fd8f0103faf0e6b1cee8b349616de 100644 (file)
@@ -2,7 +2,6 @@
 generic-y += asm-offsets.h
 generic-y += gpio.h
 generic-y += kvm_para.h
-generic-y += local64.h
 generic-y += qrwlock.h
 generic-y += user.h
 generic-y += vmlinux.lds.h
index ddf04f32b5467566868294d073046a3c044d9d8c..60ee7f0d60a8ff83c9b20aace40b58f194cf4e71 100644 (file)
@@ -2,7 +2,6 @@
 generic-y += asm-offsets.h
 generic-y += extable.h
 generic-y += kvm_para.h
-generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += parport.h
 generic-y += spinlock.h
index 373964bb177e41421168ae7a0ec532eae9a197d7..3ece3c93fe086e73c2a813ee132fd877b439f23e 100644 (file)
@@ -2,5 +2,4 @@
 generic-y += extable.h
 generic-y += iomap.h
 generic-y += kvm_para.h
-generic-y += local64.h
 generic-y += mcs_spinlock.h
diff --git a/arch/ia64/include/asm/local64.h b/arch/ia64/include/asm/local64.h
deleted file mode 100644 (file)
index 36c93b5..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
index dd8c166ffd7b5ffbf56e3217a209512745d7e65b..42ed5248fae9876875a71f5b66c40a0a884c8749 100644 (file)
@@ -3,6 +3,7 @@
 #define _ASM_IA64_SPARSEMEM_H
 
 #ifdef CONFIG_SPARSEMEM
+#include <asm/page.h>
 /*
  * SECTION_SIZE_BITS            2^N: how big each section will be
  * MAX_PHYSMEM_BITS             2^N: how much memory we can have in that space
index 9b5acf8fb092c42ae93bbce929a91c2a5b0ccabb..e76386a3479ea273b01f39b3900870c7b92e1e00 100644 (file)
@@ -536,7 +536,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
 
        if (map_start < map_end)
                memmap_init_zone((unsigned long)(map_end - map_start),
-                                args->nid, args->zone, page_to_pfn(map_start),
+                                args->nid, args->zone, page_to_pfn(map_start), page_to_pfn(map_end),
                                 MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
        return 0;
 }
@@ -546,7 +546,7 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
             unsigned long start_pfn)
 {
        if (!vmem_map) {
-               memmap_init_zone(size, nid, zone, start_pfn,
+               memmap_init_zone(size, nid, zone, start_pfn, start_pfn + size,
                                 MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
        } else {
                struct page *start;
index 1bff55aa2d54e2ce8dd312da3c7f8d426a78540f..0dbf9c5c6faeb30eeb38bea52ab7fade99bbd44a 100644 (file)
@@ -2,6 +2,5 @@
 generated-y += syscall_table.h
 generic-y += extable.h
 generic-y += kvm_para.h
-generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += spinlock.h
index 63bce836b9f10f512dc285c7df29d59aba125359..29b0e557aa7c5b6f741f4d82f0cda0b467f5ee2d 100644 (file)
@@ -2,7 +2,6 @@
 generated-y += syscall_table.h
 generic-y += extable.h
 generic-y += kvm_para.h
-generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += parport.h
 generic-y += syscalls.h
index c61c641674e6b4765495cf4cc9751af7b54d539d..e3946b06e840a6b2e6c5a297e81f16db50a4c914 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/libfdt.h>
 
 #include <asm/addrspace.h>
+#include <asm/unaligned.h>
 
 /*
  * These two variables specify the free mem region
@@ -117,7 +118,7 @@ void decompress_kernel(unsigned long boot_heap_start)
                dtb_size = fdt_totalsize((void *)&__appended_dtb);
 
                /* last four bytes is always image size in little endian */
-               image_size = le32_to_cpup((void *)&__image_end - 4);
+               image_size = get_unaligned_le32((void *)&__image_end - 4);
 
                /* copy dtb to where the booted kernel will expect it */
                memcpy((void *)VMLINUX_LOAD_ADDRESS_ULL + image_size,
index bd47e15d02c73ea599e896a197500896ee7c174a..be5d4afcd30f930e6eb828d55a05099be8402a5d 100644 (file)
@@ -1444,7 +1444,7 @@ static void octeon_irq_setup_secondary_ciu2(void)
 static int __init octeon_irq_init_ciu(
        struct device_node *ciu_node, struct device_node *parent)
 {
-       unsigned int i, r;
+       int i, r;
        struct irq_chip *chip;
        struct irq_chip *chip_edge;
        struct irq_chip *chip_mbox;
index 198b3bafdac978c505126557592c5c70f45b5bfd..95b4fa7bd0d1fd92abf55f85f4d8a35379359416 100644 (file)
@@ -6,7 +6,6 @@ generated-y += syscall_table_64_n64.h
 generated-y += syscall_table_64_o32.h
 generic-y += export.h
 generic-y += kvm_para.h
-generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += parport.h
 generic-y += qrwlock.h
index 6ee3f7218c675b6c605d06776ef1164ef6d13441..c4441416e96b6a28c91e884a9594f4b3dbb5efd9 100644 (file)
@@ -103,4 +103,11 @@ jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value)
 #undef ns_to_kernel_old_timeval
 #define ns_to_kernel_old_timeval ns_to_old_timeval32
 
+/*
+ * Some data types as stored in coredump.
+ */
+#define user_long_t             compat_long_t
+#define user_siginfo_t          compat_siginfo_t
+#define copy_siginfo_to_external        copy_siginfo_to_external32
+
 #include "../../../fs/binfmt_elf.c"
index 6dd103d3cebba2f299fa60489b65c9377f975ee4..7b2a23f48c1ac227f6bad0eb4eadb068c28017ea 100644 (file)
@@ -106,4 +106,11 @@ jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value)
 #undef ns_to_kernel_old_timeval
 #define ns_to_kernel_old_timeval ns_to_old_timeval32
 
+/*
+ * Some data types as stored in coredump.
+ */
+#define user_long_t             compat_long_t
+#define user_siginfo_t          compat_siginfo_t
+#define copy_siginfo_to_external        copy_siginfo_to_external32
+
 #include "../../../fs/binfmt_elf.c"
index 47aeb3350a76029edaf96a03d29d8ce975058232..0e365b7c742d93b0a0c1ce06fdbcb0aa8b180dff 100644 (file)
@@ -187,8 +187,14 @@ static int __init relocate_exception_table(long offset)
 static inline __init unsigned long rotate_xor(unsigned long hash,
                                              const void *area, size_t size)
 {
-       size_t i;
-       unsigned long *ptr = (unsigned long *)area;
+       const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash));
+       size_t diff, i;
+
+       diff = (void *)ptr - area;
+       if (unlikely(size < diff + sizeof(hash)))
+               return hash;
+
+       size = ALIGN_DOWN(size - diff, sizeof(hash));
 
        for (i = 0; i < size / sizeof(hash); i++) {
                /* Rotate by odd number of bits and XOR. */
index ff1e94299317dd520337808c211d90b0fa81c09f..82a4453c9c2d52fb1de045f5770bdc3fd5921f75 100644 (file)
@@ -4,6 +4,5 @@ generic-y += cmpxchg.h
 generic-y += export.h
 generic-y += gpio.h
 generic-y += kvm_para.h
-generic-y += local64.h
 generic-y += parport.h
 generic-y += user.h
index 442f3d3bcd9043671316820230a8c6076ca58ead..ca5987e110538c353fcc5b8822e7a3569b5ad5bf 100644 (file)
@@ -1,7 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 generic-y += extable.h
 generic-y += kvm_para.h
-generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += qspinlock_types.h
 generic-y += qspinlock.h
index f16c4db8011627309331796a9767019759a6b2fd..4406475a23040fd77cf1e9ea3c02f29a5d463dae 100644 (file)
@@ -3,6 +3,5 @@ generated-y += syscall_table_32.h
 generated-y += syscall_table_64.h
 generated-y += syscall_table_c32.h
 generic-y += kvm_para.h
-generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += user.h
index 90cd5c53af66646153d69ce620f2393a7b6039f3..e1f9b4ea1c537be14ea09ad77d84467ea4fa5802 100644 (file)
@@ -5,7 +5,6 @@ generated-y += syscall_table_c32.h
 generated-y += syscall_table_spu.h
 generic-y += export.h
 generic-y += kvm_types.h
-generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += qrwlock.h
 generic-y += vtime.h
index 81671aa365b34d39dde25f31b69f19d211458354..77c635c2c90d43f2979006e280ba8e1071ea5b41 100644 (file)
@@ -103,6 +103,8 @@ int gettimeofday_fallback(struct __kernel_old_timeval *_tv, struct timezone *_tz
        return do_syscall_2(__NR_gettimeofday, (unsigned long)_tv, (unsigned long)_tz);
 }
 
+#ifdef __powerpc64__
+
 static __always_inline
 int clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
 {
@@ -115,10 +117,22 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
        return do_syscall_2(__NR_clock_getres, _clkid, (unsigned long)_ts);
 }
 
-#ifdef CONFIG_VDSO32
+#else
 
 #define BUILD_VDSO32           1
 
+static __always_inline
+int clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+       return do_syscall_2(__NR_clock_gettime64, _clkid, (unsigned long)_ts);
+}
+
+static __always_inline
+int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+       return do_syscall_2(__NR_clock_getres_time64, _clkid, (unsigned long)_ts);
+}
+
 static __always_inline
 int clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
 {
index 349bf3f0c3afa994c5cc0c0ccd88ad4c1df7c8ac..858fbc8b19f325a1f28db35378f3f5f7175f4763 100644 (file)
@@ -260,10 +260,19 @@ __secondary_hold_acknowledge:
 MachineCheck:
        EXCEPTION_PROLOG_0
 #ifdef CONFIG_PPC_CHRP
+#ifdef CONFIG_VMAP_STACK
+       mtspr   SPRN_SPRG_SCRATCH2,r1
+       mfspr   r1, SPRN_SPRG_THREAD
+       lwz     r1, RTAS_SP(r1)
+       cmpwi   cr1, r1, 0
+       bne     cr1, 7f
+       mfspr   r1, SPRN_SPRG_SCRATCH2
+#else
        mfspr   r11, SPRN_SPRG_THREAD
        lwz     r11, RTAS_SP(r11)
        cmpwi   cr1, r11, 0
        bne     cr1, 7f
+#endif
 #endif /* CONFIG_PPC_CHRP */
        EXCEPTION_PROLOG_1 for_rtas=1
 7:     EXCEPTION_PROLOG_2
index 0318ba436f34eb280986c13a6ca9f811cf06301a..4ab426b8b0e02276423241b25d2386eedc24b90e 100644 (file)
@@ -85,7 +85,7 @@ SECTIONS
                ALIGN_FUNCTION();
 #endif
                /* careful! __ftr_alt_* sections need to be close to .text */
-               *(.text.hot TEXT_MAIN .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text);
+               *(.text.hot .text.hot.* TEXT_MAIN .text.fixup .text.unlikely .text.unlikely.* .fixup __ftr_alt_* .ref.text);
 #ifdef CONFIG_PPC64
                *(.tramp.ftrace.text);
 #endif
@@ -187,6 +187,12 @@ SECTIONS
        .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
                _sinittext = .;
                INIT_TEXT
+
+               /*
+                *.init.text might be RO so we must ensure this section ends on
+                * a page boundary.
+                */
+               . = ALIGN(PAGE_SIZE);
                _einittext = .;
 #ifdef CONFIG_PPC64
                *(.tramp.ftrace.init);
@@ -200,6 +206,8 @@ SECTIONS
                EXIT_TEXT
        }
 
+       . = ALIGN(PAGE_SIZE);
+
        INIT_DATA_SECTION(16)
 
        . = ALIGN(8);
index 81b76d44725d7c50d3cdf2f406705b03ba5d5b56..e9e2c1f0a690594389a2d034a56f59f8a376a89f 100644 (file)
@@ -137,7 +137,7 @@ config PA_BITS
 
 config PAGE_OFFSET
        hex
-       default 0xC0000000 if 32BIT && MAXPHYSMEM_2GB
+       default 0xC0000000 if 32BIT && MAXPHYSMEM_1GB
        default 0x80000000 if 64BIT && !MMU
        default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB
        default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB
@@ -247,10 +247,12 @@ config MODULE_SECTIONS
 
 choice
        prompt "Maximum Physical Memory"
-       default MAXPHYSMEM_2GB if 32BIT
+       default MAXPHYSMEM_1GB if 32BIT
        default MAXPHYSMEM_2GB if 64BIT && CMODEL_MEDLOW
        default MAXPHYSMEM_128GB if 64BIT && CMODEL_MEDANY
 
+       config MAXPHYSMEM_1GB
+               bool "1GiB"
        config MAXPHYSMEM_2GB
                bool "2GiB"
        config MAXPHYSMEM_128GB
index 4a2729f5ca3f0113be3e02e72f9c427fe02207cc..24d75a146e02d441c167b2b5028e3303eb80f1e3 100644 (file)
@@ -88,7 +88,9 @@
        phy-mode = "gmii";
        phy-handle = <&phy0>;
        phy0: ethernet-phy@0 {
+               compatible = "ethernet-phy-id0007.0771";
                reg = <0>;
+               reset-gpios = <&gpio 12 GPIO_ACTIVE_LOW>;
        };
 };
 
index d222d353d86d40823e249bfdfa80e4a8bd3f4045..8c3d1e4517031ac56b07c1c1c393b0d7b64cee79 100644 (file)
@@ -64,6 +64,8 @@ CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_VIRTIO=y
 CONFIG_SPI=y
 CONFIG_SPI_SIFIVE=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SIFIVE=y
 # CONFIG_PTP_1588_CLOCK is not set
 CONFIG_POWER_RESET=y
 CONFIG_DRM=y
index 59dd7be550054fb785029616074579ab32dd8835..445ccc97305a5ed8d7f395f2df1c932b8397dad6 100644 (file)
@@ -3,6 +3,5 @@ generic-y += early_ioremap.h
 generic-y += extable.h
 generic-y += flat.h
 generic-y += kvm_para.h
-generic-y += local64.h
 generic-y += user.h
 generic-y += vmlinux.lds.h
index 41a72861987ccc9a0406b4926bd21e8e2c728ebb..251e1db088fa2d8508762bcc5e459f2e7b120684 100644 (file)
@@ -99,7 +99,6 @@
                                | _PAGE_DIRTY)
 
 #define PAGE_KERNEL            __pgprot(_PAGE_KERNEL)
-#define PAGE_KERNEL_EXEC       __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
 #define PAGE_KERNEL_READ       __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
 #define PAGE_KERNEL_EXEC       __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
 #define PAGE_KERNEL_READ_EXEC  __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
index 8454f746bbfd0f3ed6712537847deb332d0e32d0..1453a2f563bcc8180101763585e776c0abe5e426 100644 (file)
@@ -10,7 +10,7 @@
 
 #include <linux/types.h>
 
-#ifndef GENERIC_TIME_VSYSCALL
+#ifndef CONFIG_GENERIC_TIME_VSYSCALL
 struct vdso_data {
 };
 #endif
index de59dd457b41584083dee7866e189bbb396945dd..d8678135704425973c3ba11d058dd56babef2481 100644 (file)
@@ -26,7 +26,16 @@ cache_get_priv_group(struct cacheinfo *this_leaf)
 
 static struct cacheinfo *get_cacheinfo(u32 level, enum cache_type type)
 {
-       struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(smp_processor_id());
+       /*
+        * Using raw_smp_processor_id() elides a preemptability check, but this
+        * is really indicative of a larger problem: the cacheinfo UABI assumes
+        * that cores have a homonogenous view of the cache hierarchy.  That
+        * happens to be the case for the current set of RISC-V systems, but
+        * likely won't be true in general.  Since there's no way to provide
+        * correct information for these systems via the current UABI we're
+        * just eliding the check for now.
+        */
+       struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(raw_smp_processor_id());
        struct cacheinfo *this_leaf;
        int index;
 
index 524d918f3601b2f73b8d4cf88a1936ad507b5f15..744f3209c48d0b30d0e67f7b00f136b75d5e0fd0 100644 (file)
@@ -124,15 +124,15 @@ skip_context_tracking:
        REG_L a1, (a1)
        jr a1
 1:
-#ifdef CONFIG_TRACE_IRQFLAGS
-       call trace_hardirqs_on
-#endif
        /*
         * Exceptions run with interrupts enabled or disabled depending on the
         * state of SR_PIE in m/sstatus.
         */
        andi t0, s1, SR_PIE
        beqz t0, 1f
+#ifdef CONFIG_TRACE_IRQFLAGS
+       call trace_hardirqs_on
+#endif
        csrs CSR_STATUS, SR_IE
 
 1:
@@ -155,6 +155,15 @@ skip_context_tracking:
        tail do_trap_unknown
 
 handle_syscall:
+#ifdef CONFIG_RISCV_M_MODE
+       /*
+        * When running is M-Mode (no MMU config), MPIE does not get set.
+        * As a result, we need to force enable interrupts here because
+        * handle_exception did not do set SR_IE as it always sees SR_PIE
+        * being cleared.
+        */
+       csrs CSR_STATUS, SR_IE
+#endif
 #if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
        /* Recover a0 - a7 for system calls */
        REG_L a0, PT_A0(sp)
@@ -186,14 +195,7 @@ check_syscall_nr:
         * Syscall number held in a7.
         * If syscall number is above allowed value, redirect to ni_syscall.
         */
-       bge a7, t0, 1f
-       /*
-        * Check if syscall is rejected by tracer, i.e., a7 == -1.
-        * If yes, we pretend it was executed.
-        */
-       li t1, -1
-       beq a7, t1, ret_from_syscall_rejected
-       blt a7, t1, 1f
+       bgeu a7, t0, 1f
        /* Call syscall */
        la s0, sys_call_table
        slli t0, a7, RISCV_LGPTR
index 1d85e9bf783cf907ba1e0ef17a0d529c1f9f5cb6..3fa3f26dde85664a6ccd993d9ee3bdcdf118eee3 100644 (file)
@@ -127,7 +127,9 @@ static void __init init_resources(void)
 {
        struct memblock_region *region = NULL;
        struct resource *res = NULL;
-       int ret = 0;
+       struct resource *mem_res = NULL;
+       size_t mem_res_sz = 0;
+       int ret = 0, i = 0;
 
        code_res.start = __pa_symbol(_text);
        code_res.end = __pa_symbol(_etext) - 1;
@@ -145,16 +147,17 @@ static void __init init_resources(void)
        bss_res.end = __pa_symbol(__bss_stop) - 1;
        bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 
+       mem_res_sz = (memblock.memory.cnt + memblock.reserved.cnt) * sizeof(*mem_res);
+       mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES);
+       if (!mem_res)
+               panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz);
        /*
         * Start by adding the reserved regions, if they overlap
         * with /memory regions, insert_resource later on will take
         * care of it.
         */
        for_each_reserved_mem_region(region) {
-               res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
-               if (!res)
-                       panic("%s: Failed to allocate %zu bytes\n", __func__,
-                             sizeof(struct resource));
+               res = &mem_res[i++];
 
                res->name = "Reserved";
                res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
@@ -171,8 +174,10 @@ static void __init init_resources(void)
                 * Ignore any other reserved regions within
                 * system memory.
                 */
-               if (memblock_is_memory(res->start))
+               if (memblock_is_memory(res->start)) {
+                       memblock_free((phys_addr_t) res, sizeof(struct resource));
                        continue;
+               }
 
                ret = add_resource(&iomem_resource, res);
                if (ret < 0)
@@ -181,10 +186,7 @@ static void __init init_resources(void)
 
        /* Add /memory regions to the resource tree */
        for_each_mem_region(region) {
-               res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
-               if (!res)
-                       panic("%s: Failed to allocate %zu bytes\n", __func__,
-                             sizeof(struct resource));
+               res = &mem_res[i++];
 
                if (unlikely(memblock_is_nomap(region))) {
                        res->name = "Reserved";
@@ -205,9 +207,9 @@ static void __init init_resources(void)
        return;
 
  error:
-       memblock_free((phys_addr_t) res, sizeof(struct resource));
        /* Better an empty resource tree than an inconsistent one */
        release_child_resources(&iomem_resource);
+       memblock_free((phys_addr_t) mem_res, mem_res_sz);
 }
 
 
index 48b870a685b3092f863474539ffd7ecfd00a0ffe..df5d2da7c40be3fa9ec41e464911277b98321db3 100644 (file)
@@ -14,7 +14,7 @@
 
 #include <asm/stacktrace.h>
 
-register unsigned long sp_in_global __asm__("sp");
+register const unsigned long sp_in_global __asm__("sp");
 
 #ifdef CONFIG_FRAME_POINTER
 
@@ -28,9 +28,8 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
                sp = user_stack_pointer(regs);
                pc = instruction_pointer(regs);
        } else if (task == NULL || task == current) {
-               const register unsigned long current_sp = sp_in_global;
                fp = (unsigned long)__builtin_frame_address(0);
-               sp = current_sp;
+               sp = sp_in_global;
                pc = (unsigned long)walk_stackframe;
        } else {
                /* task blocked in __switch_to */
index 4d3a1048ad8b1cbc690c75321933e9057a2b69b6..8a5cf99c07762403ebd55ba6f54e03005e28b7e4 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright (C) 2017 SiFive
  */
 
+#include <linux/of_clk.h>
 #include <linux/clocksource.h>
 #include <linux/delay.h>
 #include <asm/sbi.h>
@@ -24,6 +25,8 @@ void __init time_init(void)
        riscv_timebase = prop;
 
        lpj_fine = riscv_timebase / HZ;
+
+       of_clk_init(NULL);
        timer_probe();
 }
 
index 678204231700cad475be9334ab77cd46cefc6fdb..3f1d35e7c98a62d9b3113999e45938e2b97dfbab 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/binfmts.h>
 #include <linux/err.h>
 #include <asm/page.h>
-#ifdef GENERIC_TIME_VSYSCALL
+#ifdef CONFIG_GENERIC_TIME_VSYSCALL
 #include <vdso/datapage.h>
 #else
 #include <asm/vdso.h>
index bf5379135e39b628619de135632876d5338abe9d..7cd4993f4ff21a932b6ef29512f685ea4e947f9a 100644 (file)
@@ -157,9 +157,10 @@ disable:
 void __init setup_bootmem(void)
 {
        phys_addr_t mem_start = 0;
-       phys_addr_t start, end = 0;
+       phys_addr_t start, dram_end, end = 0;
        phys_addr_t vmlinux_end = __pa_symbol(&_end);
        phys_addr_t vmlinux_start = __pa_symbol(&_start);
+       phys_addr_t max_mapped_addr = __pa(~(ulong)0);
        u64 i;
 
        /* Find the memory region containing the kernel */
@@ -181,7 +182,18 @@ void __init setup_bootmem(void)
        /* Reserve from the start of the kernel to the end of the kernel */
        memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
 
-       max_pfn = PFN_DOWN(memblock_end_of_DRAM());
+       dram_end = memblock_end_of_DRAM();
+
+       /*
+        * memblock allocator is not aware of the fact that last 4K bytes of
+        * the addressable memory can not be mapped because of IS_ERR_VALUE
+        * macro. Make sure that last 4k bytes are not usable by memblock
+        * if end of dram is equal to maximum addressable memory.
+        */
+       if (max_mapped_addr == (dram_end - 1))
+               memblock_set_current_limit(max_mapped_addr - 4096);
+
+       max_pfn = PFN_DOWN(dram_end);
        max_low_pfn = max_pfn;
        dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
        set_max_mapnr(max_low_pfn);
index 12ddd1f6bf70c8e96ac92b02e0b37c8e22c51043..a8a2ffd9114aaa22c60661c7cd2d80023747cb2e 100644 (file)
@@ -93,8 +93,8 @@ void __init kasan_init(void)
                                                                VMALLOC_END));
 
        for_each_mem_range(i, &_start, &_end) {
-               void *start = (void *)_start;
-               void *end = (void *)_end;
+               void *start = (void *)__va(_start);
+               void *end = (void *)__va(_end);
 
                if (start >= end)
                        break;
index e84bdd15150bf51bac297f5ae526d137bf17a5db..c72874f09741c7c3de56becaffdaf1c8e3e6204c 100644 (file)
@@ -54,17 +54,23 @@ config KASAN_SHADOW_OFFSET
 
 config S390
        def_bool y
+       #
+       # Note: keep this list sorted alphabetically
+       #
+       imply IMA_SECURE_AND_OR_TRUSTED_BOOT
        select ARCH_BINFMT_ELF_STATE
        select ARCH_HAS_DEBUG_VM_PGTABLE
        select ARCH_HAS_DEBUG_WX
        select ARCH_HAS_DEVMEM_IS_ALLOWED
        select ARCH_HAS_ELF_RANDOMIZE
+       select ARCH_HAS_FORCE_DMA_UNENCRYPTED
        select ARCH_HAS_FORTIFY_SOURCE
        select ARCH_HAS_GCOV_PROFILE_ALL
        select ARCH_HAS_GIGANTIC_PAGE
        select ARCH_HAS_KCOV
        select ARCH_HAS_MEM_ENCRYPT
        select ARCH_HAS_PTE_SPECIAL
+       select ARCH_HAS_SCALED_CPUTIME
        select ARCH_HAS_SET_MEMORY
        select ARCH_HAS_STRICT_KERNEL_RWX
        select ARCH_HAS_STRICT_MODULE_RWX
@@ -111,8 +117,10 @@ config S390
        select ARCH_WANT_IPC_PARSE_VERSION
        select BUILDTIME_TABLE_SORT
        select CLONE_BACKWARDS2
+       select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
        select DMA_OPS if PCI
        select DYNAMIC_FTRACE if FUNCTION_TRACER
+       select GENERIC_ALLOCATOR
        select GENERIC_CPU_AUTOPROBE
        select GENERIC_CPU_VULNERABILITIES
        select GENERIC_FIND_FIRST_BIT
@@ -126,22 +134,21 @@ config S390
        select HAVE_ARCH_JUMP_LABEL_RELATIVE
        select HAVE_ARCH_KASAN
        select HAVE_ARCH_KASAN_VMALLOC
-       select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_SOFT_DIRTY
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE
        select HAVE_ARCH_VMAP_STACK
        select HAVE_ASM_MODVERSIONS
-       select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
        select HAVE_CMPXCHG_DOUBLE
        select HAVE_CMPXCHG_LOCAL
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DMA_CONTIGUOUS
        select HAVE_DYNAMIC_FTRACE
        select HAVE_DYNAMIC_FTRACE_WITH_REGS
-       select HAVE_FAST_GUP
+       select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
        select HAVE_EFFICIENT_UNALIGNED_ACCESS
+       select HAVE_FAST_GUP
        select HAVE_FENTRY
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_FUNCTION_ERROR_INJECTION
@@ -163,16 +170,15 @@ config S390
        select HAVE_KRETPROBES
        select HAVE_KVM
        select HAVE_LIVEPATCH
-       select HAVE_PERF_REGS
-       select HAVE_PERF_USER_STACK_DUMP
        select HAVE_MEMBLOCK_PHYS_MAP
-       select MMU_GATHER_NO_GATHER
        select HAVE_MOD_ARCH_SPECIFIC
+       select HAVE_NMI
        select HAVE_NOP_MCOUNT
        select HAVE_OPROFILE
        select HAVE_PCI
        select HAVE_PERF_EVENTS
-       select MMU_GATHER_RCU_TABLE_FREE
+       select HAVE_PERF_REGS
+       select HAVE_PERF_USER_STACK_DUMP
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_RELIABLE_STACKTRACE
        select HAVE_RSEQ
@@ -181,6 +187,8 @@ config S390
        select HAVE_VIRT_CPU_ACCOUNTING_IDLE
        select IOMMU_HELPER             if PCI
        select IOMMU_SUPPORT            if PCI
+       select MMU_GATHER_NO_GATHER
+       select MMU_GATHER_RCU_TABLE_FREE
        select MODULES_USE_ELF_RELA
        select NEED_DMA_MAP_STATE       if PCI
        select NEED_SG_DMA_LENGTH       if PCI
@@ -190,17 +198,12 @@ config S390
        select PCI_MSI                  if PCI
        select PCI_MSI_ARCH_FALLBACKS   if PCI_MSI
        select SPARSE_IRQ
+       select SWIOTLB
        select SYSCTL_EXCEPTION_TRACE
        select THREAD_INFO_IN_TASK
        select TTY
        select VIRT_CPU_ACCOUNTING
-       select ARCH_HAS_SCALED_CPUTIME
-       select HAVE_NMI
-       select ARCH_HAS_FORCE_DMA_UNENCRYPTED
-       select SWIOTLB
-       select GENERIC_ALLOCATOR
-       imply IMA_SECURE_AND_OR_TRUSTED_BOOT
-
+       # Note: keep the above list sorted alphabetically
 
 config SCHED_OMIT_FRAME_POINTER
        def_bool y
index 1be32fcf6f2eeaf3eb59d62ec6c8da49771f53d3..c4f6ff98a612cd8ca923faa479abb8a08a7f39d0 100644 (file)
@@ -61,7 +61,9 @@ CONFIG_OPROFILE=m
 CONFIG_KPROBES=y
 CONFIG_JUMP_LABEL=y
 CONFIG_STATIC_KEYS_SELFTEST=y
+CONFIG_SECCOMP_CACHE_DEBUG=y
 CONFIG_LOCK_EVENT_COUNTS=y
+# CONFIG_GCC_PLUGINS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_FORCE_LOAD=y
 CONFIG_MODULE_UNLOAD=y
@@ -410,12 +412,12 @@ CONFIG_SCSI_ENCLOSURE=m
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_LOGGING=y
 CONFIG_SCSI_SPI_ATTRS=m
-CONFIG_SCSI_FC_ATTRS=y
+CONFIG_SCSI_FC_ATTRS=m
 CONFIG_SCSI_SAS_LIBSAS=m
 CONFIG_SCSI_SRP_ATTRS=m
 CONFIG_ISCSI_TCP=m
 CONFIG_SCSI_DEBUG=m
-CONFIG_ZFCP=y
+CONFIG_ZFCP=m
 CONFIG_SCSI_VIRTIO=m
 CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
@@ -444,6 +446,7 @@ CONFIG_DM_MULTIPATH=m
 CONFIG_DM_MULTIPATH_QL=m
 CONFIG_DM_MULTIPATH_ST=m
 CONFIG_DM_MULTIPATH_HST=m
+CONFIG_DM_MULTIPATH_IOA=m
 CONFIG_DM_DELAY=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_FLAKEY=m
@@ -542,7 +545,6 @@ CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 CONFIG_LEGACY_PTY_COUNT=0
-CONFIG_NULL_TTY=m
 CONFIG_VIRTIO_CONSOLE=y
 CONFIG_HW_RANDOM_VIRTIO=m
 CONFIG_RAW_DRIVER=m
@@ -574,6 +576,7 @@ CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
 CONFIG_VHOST_NET=m
 CONFIG_VHOST_VSOCK=m
+# CONFIG_SURFACE_PLATFORMS is not set
 CONFIG_S390_CCW_IOMMU=y
 CONFIG_S390_AP_IOMMU=y
 CONFIG_EXT4_FS=y
@@ -655,6 +658,7 @@ CONFIG_CIFS_XATTR=y
 CONFIG_CIFS_POSIX=y
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_CIFS_SWN_UPCALL=y
 CONFIG_NLS_DEFAULT="utf8"
 CONFIG_NLS_CODEPAGE_437=m
 CONFIG_NLS_CODEPAGE_850=m
@@ -826,6 +830,8 @@ CONFIG_FTRACE_SYSCALLS=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_BPF_KPROBE_OVERRIDE=y
 CONFIG_HIST_TRIGGERS=y
+CONFIG_FTRACE_STARTUP_TEST=y
+# CONFIG_EVENT_TRACE_STARTUP_TEST is not set
 CONFIG_DEBUG_USER_ASCE=y
 CONFIG_NOTIFIER_ERROR_INJECTION=m
 CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
index e2171a0088094a299b107634df047c6e5bbb31cd..51135893cffe34ed9e6a81e7aead22308412daaa 100644 (file)
@@ -58,6 +58,7 @@ CONFIG_S390_UNWIND_SELFTEST=m
 CONFIG_OPROFILE=m
 CONFIG_KPROBES=y
 CONFIG_JUMP_LABEL=y
+# CONFIG_GCC_PLUGINS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_FORCE_LOAD=y
 CONFIG_MODULE_UNLOAD=y
@@ -95,7 +96,6 @@ CONFIG_ZSMALLOC_STAT=y
 CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
 CONFIG_IDLE_PAGE_TRACKING=y
 CONFIG_PERCPU_STATS=y
-CONFIG_GUP_TEST=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
@@ -403,12 +403,12 @@ CONFIG_SCSI_ENCLOSURE=m
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_LOGGING=y
 CONFIG_SCSI_SPI_ATTRS=m
-CONFIG_SCSI_FC_ATTRS=y
+CONFIG_SCSI_FC_ATTRS=m
 CONFIG_SCSI_SAS_LIBSAS=m
 CONFIG_SCSI_SRP_ATTRS=m
 CONFIG_ISCSI_TCP=m
 CONFIG_SCSI_DEBUG=m
-CONFIG_ZFCP=y
+CONFIG_ZFCP=m
 CONFIG_SCSI_VIRTIO=m
 CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
@@ -437,6 +437,7 @@ CONFIG_DM_MULTIPATH=m
 CONFIG_DM_MULTIPATH_QL=m
 CONFIG_DM_MULTIPATH_ST=m
 CONFIG_DM_MULTIPATH_HST=m
+CONFIG_DM_MULTIPATH_IOA=m
 CONFIG_DM_DELAY=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_FLAKEY=m
@@ -536,7 +537,6 @@ CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 CONFIG_LEGACY_PTY_COUNT=0
-CONFIG_NULL_TTY=m
 CONFIG_VIRTIO_CONSOLE=y
 CONFIG_HW_RANDOM_VIRTIO=m
 CONFIG_RAW_DRIVER=m
@@ -568,6 +568,7 @@ CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
 CONFIG_VHOST_NET=m
 CONFIG_VHOST_VSOCK=m
+# CONFIG_SURFACE_PLATFORMS is not set
 CONFIG_S390_CCW_IOMMU=y
 CONFIG_S390_AP_IOMMU=y
 CONFIG_EXT4_FS=y
@@ -645,6 +646,7 @@ CONFIG_CIFS_XATTR=y
 CONFIG_CIFS_POSIX=y
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_CIFS_SWN_UPCALL=y
 CONFIG_NLS_DEFAULT="utf8"
 CONFIG_NLS_CODEPAGE_437=m
 CONFIG_NLS_CODEPAGE_850=m
@@ -778,6 +780,7 @@ CONFIG_FTRACE_SYSCALLS=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_BPF_KPROBE_OVERRIDE=y
 CONFIG_HIST_TRIGGERS=y
+CONFIG_DEBUG_USER_ASCE=y
 CONFIG_LKDTM=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
index a302630341effc410cbc1587663639f94b75920d..1ef211dae77a0b53a3e4e955518c72dbcd8f6e31 100644 (file)
@@ -22,6 +22,7 @@ CONFIG_CRASH_DUMP=y
 # CONFIG_VIRTUALIZATION is not set
 # CONFIG_S390_GUEST is not set
 # CONFIG_SECCOMP is not set
+# CONFIG_GCC_PLUGINS is not set
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
@@ -58,6 +59,7 @@ CONFIG_RAW_DRIVER=y
 # CONFIG_HID is not set
 # CONFIG_VIRTIO_MENU is not set
 # CONFIG_VHOST_MENU is not set
+# CONFIG_SURFACE_PLATFORMS is not set
 # CONFIG_IOMMU_SUPPORT is not set
 # CONFIG_DNOTIFY is not set
 # CONFIG_INOTIFY_USER is not set
index 319efa0e6d024fecd1e503c9d722079ce1db213e..1a18d7b82f86d742d8a8c71ac8f9c665ef95fa11 100644 (file)
@@ -7,5 +7,4 @@ generated-y += unistd_nr.h
 generic-y += asm-offsets.h
 generic-y += export.h
 generic-y += kvm_types.h
-generic-y += local64.h
 generic-y += mcs_spinlock.h
index 7435182ef84658c39fd27c5c0ef88a6c4ccf29f7..fc44d9c88b41915a7021042eb8b462517cfdbd2c 100644 (file)
@@ -1,6 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
 generated-y += syscall_table.h
 generic-y += kvm_para.h
-generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += parport.h
index 5269a704801fa46621dc65c08da2e6ae357eaa92..3688fdae50e45cc6454814de2d3bdca56a4e1bb9 100644 (file)
@@ -6,5 +6,4 @@ generated-y += syscall_table_64.h
 generated-y += syscall_table_c32.h
 generic-y += export.h
 generic-y += kvm_para.h
-generic-y += local64.h
 generic-y += mcs_spinlock.h
index 7b6dd10b162ac71f4f7dd3ddbe3f8a9a5658fb16..21f851179ff08a0ceed51d8577cc34529153f804 100644 (file)
@@ -19,6 +19,7 @@ config X86_32
        select KMAP_LOCAL
        select MODULES_USE_ELF_REL
        select OLD_SIGACTION
+       select ARCH_SPLIT_ARG64
 
 config X86_64
        def_bool y
index e04d90af4c27cd4966351d277e945ff4df47461b..6375967a8244dc4e6777d8ff95de1be33ea77e06 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/hyperv-tlfs.h>
 #include <asm/mshyperv.h>
 #include <asm/idtentry.h>
+#include <linux/kexec.h>
 #include <linux/version.h>
 #include <linux/vmalloc.h>
 #include <linux/mm.h>
@@ -26,6 +27,8 @@
 #include <linux/syscore_ops.h>
 #include <clocksource/hyperv_timer.h>
 
+int hyperv_init_cpuhp;
+
 void *hv_hypercall_pg;
 EXPORT_SYMBOL_GPL(hv_hypercall_pg);
 
@@ -312,6 +315,25 @@ static struct syscore_ops hv_syscore_ops = {
        .resume         = hv_resume,
 };
 
+static void (* __initdata old_setup_percpu_clockev)(void);
+
+static void __init hv_stimer_setup_percpu_clockev(void)
+{
+       /*
+        * Ignore any errors in setting up stimer clockevents
+        * as we can run with the LAPIC timer as a fallback.
+        */
+       (void)hv_stimer_alloc();
+
+       /*
+        * Still register the LAPIC timer, because the direct-mode STIMER is
+        * not supported by old versions of Hyper-V. This also allows users
+        * to switch to LAPIC timer via /sys, if they want to.
+        */
+       if (old_setup_percpu_clockev)
+               old_setup_percpu_clockev();
+}
+
 /*
  * This function is to be invoked early in the boot sequence after the
  * hypervisor has been detected.
@@ -390,10 +412,14 @@ void __init hyperv_init(void)
        wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
 
        /*
-        * Ignore any errors in setting up stimer clockevents
-        * as we can run with the LAPIC timer as a fallback.
+        * hyperv_init() is called before LAPIC is initialized: see
+        * apic_intr_mode_init() -> x86_platform.apic_post_init() and
+        * apic_bsp_setup() -> setup_local_APIC(). The direct-mode STIMER
+        * depends on LAPIC, so hv_stimer_alloc() should be called from
+        * x86_init.timers.setup_percpu_clockev.
         */
-       (void)hv_stimer_alloc();
+       old_setup_percpu_clockev = x86_init.timers.setup_percpu_clockev;
+       x86_init.timers.setup_percpu_clockev = hv_stimer_setup_percpu_clockev;
 
        hv_apic_init();
 
@@ -401,6 +427,7 @@ void __init hyperv_init(void)
 
        register_syscore_ops(&hv_syscore_ops);
 
+       hyperv_init_cpuhp = cpuhp;
        return;
 
 remove_cpuhp_state:
index 5208ba49c89a96144ba52d82cabc21ad39a743e8..2c87350c1fb095149a06709cbc0b11f2427f7acc 100644 (file)
@@ -66,11 +66,17 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
        if (!hv_hypercall_pg)
                goto do_native;
 
-       if (cpumask_empty(cpus))
-               return;
-
        local_irq_save(flags);
 
+       /*
+        * Only check the mask _after_ interrupt has been disabled to avoid the
+        * mask changing under our feet.
+        */
+       if (cpumask_empty(cpus)) {
+               local_irq_restore(flags);
+               return;
+       }
+
        flush_pcpu = (struct hv_tlb_flush **)
                     this_cpu_ptr(hyperv_pcpu_input_arg);
 
index 3ab7b46087b74c8c112a1afcd95853bb9cb1ecdf..3d6616f6f6ef87a8ed713d271a0c14dec96f8ed4 100644 (file)
@@ -1010,9 +1010,21 @@ struct kvm_arch {
         */
        bool tdp_mmu_enabled;
 
-       /* List of struct tdp_mmu_pages being used as roots */
+       /*
+        * List of struct kvmp_mmu_pages being used as roots.
+        * All struct kvm_mmu_pages in the list should have
+        * tdp_mmu_page set.
+        * All struct kvm_mmu_pages in the list should have a positive
+        * root_count except when a thread holds the MMU lock and is removing
+        * an entry from the list.
+        */
        struct list_head tdp_mmu_roots;
-       /* List of struct tdp_mmu_pages not being used as roots */
+
+       /*
+        * List of struct kvmp_mmu_pages not being used as roots.
+        * All struct kvm_mmu_pages in the list should have
+        * tdp_mmu_page set and a root_count of 0.
+        */
        struct list_head tdp_mmu_pages;
 };
 
@@ -1287,6 +1299,8 @@ struct kvm_x86_ops {
        void (*migrate_timers)(struct kvm_vcpu *vcpu);
        void (*msr_filter_changed)(struct kvm_vcpu *vcpu);
        int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
+
+       void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
 };
 
 struct kvm_x86_nested_ops {
@@ -1468,6 +1482,7 @@ int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
 int kvm_emulate_halt(struct kvm_vcpu *vcpu);
 int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
+int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu);
 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
 
 void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
diff --git a/arch/x86/include/asm/local64.h b/arch/x86/include/asm/local64.h
deleted file mode 100644 (file)
index 36c93b5..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
index ffc289992d1b0f5bd81eeeed98feae67d71dfb22..30f76b96685799c7e95cfff039d265a1dc42dea0 100644 (file)
@@ -74,6 +74,8 @@ static inline void hv_disable_stimer0_percpu_irq(int irq) {}
 
 
 #if IS_ENABLED(CONFIG_HYPERV)
+extern int hyperv_init_cpuhp;
+
 extern void *hv_hypercall_pg;
 extern void  __percpu  **hyperv_pcpu_input_arg;
 
index f628e3dc150f38088dafb158857335f2f83866bf..43b54bef5448fa360c9ff21444b62fc678e80fd7 100644 (file)
@@ -135,14 +135,32 @@ static void hv_machine_shutdown(void)
 {
        if (kexec_in_progress && hv_kexec_handler)
                hv_kexec_handler();
+
+       /*
+        * Call hv_cpu_die() on all the CPUs, otherwise later the hypervisor
+        * corrupts the old VP Assist Pages and can crash the kexec kernel.
+        */
+       if (kexec_in_progress && hyperv_init_cpuhp > 0)
+               cpuhp_remove_state(hyperv_init_cpuhp);
+
+       /* The function calls stop_other_cpus(). */
        native_machine_shutdown();
+
+       /* Disable the hypercall page when there is only 1 active CPU. */
+       if (kexec_in_progress)
+               hyperv_cleanup();
 }
 
 static void hv_machine_crash_shutdown(struct pt_regs *regs)
 {
        if (hv_crash_handler)
                hv_crash_handler(regs);
+
+       /* The function calls crash_smp_send_stop(). */
        native_machine_crash_shutdown(regs);
+
+       /* Disable the hypercall page when there is only 1 active CPU. */
+       hyperv_cleanup();
 }
 #endif /* CONFIG_KEXEC_CORE */
 #endif /* CONFIG_HYPERV */
index 23ad8e953dfb1502a0dea9bdbc02a507c378bf8a..a29997e6cf9e6c16f31f0e225c1da094de6db5d4 100644 (file)
@@ -167,9 +167,6 @@ static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
        *repeat = 0;
        *uniform = 1;
 
-       /* Make end inclusive instead of exclusive */
-       end--;
-
        prev_match = MTRR_TYPE_INVALID;
        for (i = 0; i < num_var_ranges; ++i) {
                unsigned short start_state, end_state, inclusive;
@@ -261,6 +258,9 @@ u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform)
        int repeat;
        u64 partial_end;
 
+       /* Make end inclusive instead of exclusive */
+       end--;
+
        if (!mtrr_state_set)
                return MTRR_TYPE_INVALID;
 
index 29ffb95b25fff34e534ebd263e36e41f206be868..460f3e0df106c517be54202ed10bcb0b869a7412 100644 (file)
@@ -525,89 +525,70 @@ static void rdtgroup_remove(struct rdtgroup *rdtgrp)
        kfree(rdtgrp);
 }
 
-struct task_move_callback {
-       struct callback_head    work;
-       struct rdtgroup         *rdtgrp;
-};
-
-static void move_myself(struct callback_head *head)
+static void _update_task_closid_rmid(void *task)
 {
-       struct task_move_callback *callback;
-       struct rdtgroup *rdtgrp;
-
-       callback = container_of(head, struct task_move_callback, work);
-       rdtgrp = callback->rdtgrp;
-
        /*
-        * If resource group was deleted before this task work callback
-        * was invoked, then assign the task to root group and free the
-        * resource group.
+        * If the task is still current on this CPU, update PQR_ASSOC MSR.
+        * Otherwise, the MSR is updated when the task is scheduled in.
         */
-       if (atomic_dec_and_test(&rdtgrp->waitcount) &&
-           (rdtgrp->flags & RDT_DELETED)) {
-               current->closid = 0;
-               current->rmid = 0;
-               rdtgroup_remove(rdtgrp);
-       }
-
-       if (unlikely(current->flags & PF_EXITING))
-               goto out;
-
-       preempt_disable();
-       /* update PQR_ASSOC MSR to make resource group go into effect */
-       resctrl_sched_in();
-       preempt_enable();
+       if (task == current)
+               resctrl_sched_in();
+}
 
-out:
-       kfree(callback);
+static void update_task_closid_rmid(struct task_struct *t)
+{
+       if (IS_ENABLED(CONFIG_SMP) && task_curr(t))
+               smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1);
+       else
+               _update_task_closid_rmid(t);
 }
 
 static int __rdtgroup_move_task(struct task_struct *tsk,
                                struct rdtgroup *rdtgrp)
 {
-       struct task_move_callback *callback;
-       int ret;
-
-       callback = kzalloc(sizeof(*callback), GFP_KERNEL);
-       if (!callback)
-               return -ENOMEM;
-       callback->work.func = move_myself;
-       callback->rdtgrp = rdtgrp;
+       /* If the task is already in rdtgrp, no need to move the task. */
+       if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid &&
+            tsk->rmid == rdtgrp->mon.rmid) ||
+           (rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid &&
+            tsk->closid == rdtgrp->mon.parent->closid))
+               return 0;
 
        /*
-        * Take a refcount, so rdtgrp cannot be freed before the
-        * callback has been invoked.
+        * Set the task's closid/rmid before the PQR_ASSOC MSR can be
+        * updated by them.
+        *
+        * For ctrl_mon groups, move both closid and rmid.
+        * For monitor groups, can move the tasks only from
+        * their parent CTRL group.
         */
-       atomic_inc(&rdtgrp->waitcount);
-       ret = task_work_add(tsk, &callback->work, TWA_RESUME);
-       if (ret) {
-               /*
-                * Task is exiting. Drop the refcount and free the callback.
-                * No need to check the refcount as the group cannot be
-                * deleted before the write function unlocks rdtgroup_mutex.
-                */
-               atomic_dec(&rdtgrp->waitcount);
-               kfree(callback);
-               rdt_last_cmd_puts("Task exited\n");
-       } else {
-               /*
-                * For ctrl_mon groups move both closid and rmid.
-                * For monitor groups, can move the tasks only from
-                * their parent CTRL group.
-                */
-               if (rdtgrp->type == RDTCTRL_GROUP) {
-                       tsk->closid = rdtgrp->closid;
+
+       if (rdtgrp->type == RDTCTRL_GROUP) {
+               tsk->closid = rdtgrp->closid;
+               tsk->rmid = rdtgrp->mon.rmid;
+       } else if (rdtgrp->type == RDTMON_GROUP) {
+               if (rdtgrp->mon.parent->closid == tsk->closid) {
                        tsk->rmid = rdtgrp->mon.rmid;
-               } else if (rdtgrp->type == RDTMON_GROUP) {
-                       if (rdtgrp->mon.parent->closid == tsk->closid) {
-                               tsk->rmid = rdtgrp->mon.rmid;
-                       } else {
-                               rdt_last_cmd_puts("Can't move task to different control group\n");
-                               ret = -EINVAL;
-                       }
+               } else {
+                       rdt_last_cmd_puts("Can't move task to different control group\n");
+                       return -EINVAL;
                }
        }
-       return ret;
+
+       /*
+        * Ensure the task's closid and rmid are written before determining if
+        * the task is current that will decide if it will be interrupted.
+        */
+       barrier();
+
+       /*
+        * By now, the task's closid and rmid are set. If the task is current
+        * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource
+        * group go into effect. If the task is not current, the MSR will be
+        * updated when the task is scheduled in.
+        */
+       update_task_closid_rmid(tsk);
+
+       return 0;
 }
 
 static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
index 7d04b356d44d3362b9d59c561f4c680514a56a2b..cdc04d09124233d0c90c3def9a2dc6fcaadaade9 100644 (file)
@@ -305,14 +305,14 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
        case 0xe4:
        case 0xe5:
                *exitinfo |= IOIO_TYPE_IN;
-               *exitinfo |= (u64)insn->immediate.value << 16;
+               *exitinfo |= (u8)insn->immediate.value << 16;
                break;
 
        /* OUT immediate opcodes */
        case 0xe6:
        case 0xe7:
                *exitinfo |= IOIO_TYPE_OUT;
-               *exitinfo |= (u64)insn->immediate.value << 16;
+               *exitinfo |= (u8)insn->immediate.value << 16;
                break;
 
        /* IN register opcodes */
index 3136e05831cf35f9ac852c5b3dc6d2bd8dac4a4c..43cceadd073edca4172d3cc4137dcbe271906a81 100644 (file)
@@ -674,7 +674,7 @@ static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
                           (unsigned long long)vcpu->arch.pv_eoi.msr_val);
                return false;
        }
-       return val & 0x1;
+       return val & KVM_PV_EOI_ENABLED;
 }
 
 static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
@@ -2898,7 +2898,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
                        /* evaluate pending_events before reading the vector */
                        smp_rmb();
                        sipi_vector = apic->sipi_vector;
-                       kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
+                       kvm_x86_ops.vcpu_deliver_sipi_vector(vcpu, sipi_vector);
                        vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
                }
        }
index 9c4a9c8e43d90e23b543a905cc77df0ca6f59246..581925e476d6c57ccc7af45f61761b55e8686c8e 100644 (file)
@@ -49,7 +49,7 @@ static inline u64 rsvd_bits(int s, int e)
        if (e < s)
                return 0;
 
-       return ((1ULL << (e - s + 1)) - 1) << s;
+       return ((2ULL << (e - s)) - 1) << s;
 }
 
 void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask);
index c478904af5189819e8daa217564240ac3aa33a7a..6d16481aa29de023fc057c1112c9c2086d139f22 100644 (file)
@@ -3493,26 +3493,25 @@ static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
  * Return the level of the lowest level SPTE added to sptes.
  * That SPTE may be non-present.
  */
-static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes)
+static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
 {
        struct kvm_shadow_walk_iterator iterator;
-       int leaf = vcpu->arch.mmu->root_level;
+       int leaf = -1;
        u64 spte;
 
-
        walk_shadow_page_lockless_begin(vcpu);
 
-       for (shadow_walk_init(&iterator, vcpu, addr);
+       for (shadow_walk_init(&iterator, vcpu, addr),
+            *root_level = iterator.level;
             shadow_walk_okay(&iterator);
             __shadow_walk_next(&iterator, spte)) {
                leaf = iterator.level;
                spte = mmu_spte_get_lockless(iterator.sptep);
 
-               sptes[leaf - 1] = spte;
+               sptes[leaf] = spte;
 
                if (!is_shadow_present_pte(spte))
                        break;
-
        }
 
        walk_shadow_page_lockless_end(vcpu);
@@ -3520,14 +3519,12 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes)
        return leaf;
 }
 
-/* return true if reserved bit is detected on spte. */
+/* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
 static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
 {
-       u64 sptes[PT64_ROOT_MAX_LEVEL];
+       u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
        struct rsvd_bits_validate *rsvd_check;
-       int root = vcpu->arch.mmu->shadow_root_level;
-       int leaf;
-       int level;
+       int root, leaf, level;
        bool reserved = false;
 
        if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) {
@@ -3536,35 +3533,45 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
        }
 
        if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
-               leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes);
+               leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
        else
-               leaf = get_walk(vcpu, addr, sptes);
+               leaf = get_walk(vcpu, addr, sptes, &root);
+
+       if (unlikely(leaf < 0)) {
+               *sptep = 0ull;
+               return reserved;
+       }
+
+       *sptep = sptes[leaf];
+
+       /*
+        * Skip reserved bits checks on the terminal leaf if it's not a valid
+        * SPTE.  Note, this also (intentionally) skips MMIO SPTEs, which, by
+        * design, always have reserved bits set.  The purpose of the checks is
+        * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs.
+        */
+       if (!is_shadow_present_pte(sptes[leaf]))
+               leaf++;
 
        rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
 
-       for (level = root; level >= leaf; level--) {
-               if (!is_shadow_present_pte(sptes[level - 1]))
-                       break;
+       for (level = root; level >= leaf; level--)
                /*
                 * Use a bitwise-OR instead of a logical-OR to aggregate the
                 * reserved bit and EPT's invalid memtype/XWR checks to avoid
                 * adding a Jcc in the loop.
                 */
-               reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level - 1]) |
-                           __is_rsvd_bits_set(rsvd_check, sptes[level - 1],
-                                              level);
-       }
+               reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level]) |
+                           __is_rsvd_bits_set(rsvd_check, sptes[level], level);
 
        if (reserved) {
                pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
                       __func__, addr);
                for (level = root; level >= leaf; level--)
                        pr_err("------ spte 0x%llx level %d.\n",
-                              sptes[level - 1], level);
+                              sptes[level], level);
        }
 
-       *sptep = sptes[leaf - 1];
-
        return reserved;
 }
 
index 4bd2f1dc0172c98e9beb9657de8b7321127064b2..2ef8615f9dba87bca72d4912df6e4cd9394def56 100644 (file)
@@ -44,7 +44,48 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
        WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
 }
 
-#define for_each_tdp_mmu_root(_kvm, _root)                         \
+static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
+{
+       if (kvm_mmu_put_root(kvm, root))
+               kvm_tdp_mmu_free_root(kvm, root);
+}
+
+static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
+                                          struct kvm_mmu_page *root)
+{
+       lockdep_assert_held(&kvm->mmu_lock);
+
+       if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
+               return false;
+
+       kvm_mmu_get_root(kvm, root);
+       return true;
+
+}
+
+static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
+                                                    struct kvm_mmu_page *root)
+{
+       struct kvm_mmu_page *next_root;
+
+       next_root = list_next_entry(root, link);
+       tdp_mmu_put_root(kvm, root);
+       return next_root;
+}
+
+/*
+ * Note: this iterator gets and puts references to the roots it iterates over.
+ * This makes it safe to release the MMU lock and yield within the loop, but
+ * if exiting the loop early, the caller must drop the reference to the most
+ * recent root. (Unless keeping a live reference is desirable.)
+ */
+#define for_each_tdp_mmu_root_yield_safe(_kvm, _root)                          \
+       for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots,        \
+                                     typeof(*_root), link);            \
+            tdp_mmu_next_root_valid(_kvm, _root);                      \
+            _root = tdp_mmu_next_root(_kvm, _root))
+
+#define for_each_tdp_mmu_root(_kvm, _root)                             \
        list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
 
 bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
@@ -447,18 +488,9 @@ bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
        struct kvm_mmu_page *root;
        bool flush = false;
 
-       for_each_tdp_mmu_root(kvm, root) {
-               /*
-                * Take a reference on the root so that it cannot be freed if
-                * this thread releases the MMU lock and yields in this loop.
-                */
-               kvm_mmu_get_root(kvm, root);
-
+       for_each_tdp_mmu_root_yield_safe(kvm, root)
                flush |= zap_gfn_range(kvm, root, start, end, true);
 
-               kvm_mmu_put_root(kvm, root);
-       }
-
        return flush;
 }
 
@@ -619,13 +651,7 @@ static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start,
        int ret = 0;
        int as_id;
 
-       for_each_tdp_mmu_root(kvm, root) {
-               /*
-                * Take a reference on the root so that it cannot be freed if
-                * this thread releases the MMU lock and yields in this loop.
-                */
-               kvm_mmu_get_root(kvm, root);
-
+       for_each_tdp_mmu_root_yield_safe(kvm, root) {
                as_id = kvm_mmu_page_as_id(root);
                slots = __kvm_memslots(kvm, as_id);
                kvm_for_each_memslot(memslot, slots) {
@@ -647,8 +673,6 @@ static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start,
                        ret |= handler(kvm, memslot, root, gfn_start,
                                       gfn_end, data);
                }
-
-               kvm_mmu_put_root(kvm, root);
        }
 
        return ret;
@@ -838,21 +862,13 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
        int root_as_id;
        bool spte_set = false;
 
-       for_each_tdp_mmu_root(kvm, root) {
+       for_each_tdp_mmu_root_yield_safe(kvm, root) {
                root_as_id = kvm_mmu_page_as_id(root);
                if (root_as_id != slot->as_id)
                        continue;
 
-               /*
-                * Take a reference on the root so that it cannot be freed if
-                * this thread releases the MMU lock and yields in this loop.
-                */
-               kvm_mmu_get_root(kvm, root);
-
                spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
                             slot->base_gfn + slot->npages, min_level);
-
-               kvm_mmu_put_root(kvm, root);
        }
 
        return spte_set;
@@ -906,21 +922,13 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
        int root_as_id;
        bool spte_set = false;
 
-       for_each_tdp_mmu_root(kvm, root) {
+       for_each_tdp_mmu_root_yield_safe(kvm, root) {
                root_as_id = kvm_mmu_page_as_id(root);
                if (root_as_id != slot->as_id)
                        continue;
 
-               /*
-                * Take a reference on the root so that it cannot be freed if
-                * this thread releases the MMU lock and yields in this loop.
-                */
-               kvm_mmu_get_root(kvm, root);
-
                spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
                                slot->base_gfn + slot->npages);
-
-               kvm_mmu_put_root(kvm, root);
        }
 
        return spte_set;
@@ -1029,21 +1037,13 @@ bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
        int root_as_id;
        bool spte_set = false;
 
-       for_each_tdp_mmu_root(kvm, root) {
+       for_each_tdp_mmu_root_yield_safe(kvm, root) {
                root_as_id = kvm_mmu_page_as_id(root);
                if (root_as_id != slot->as_id)
                        continue;
 
-               /*
-                * Take a reference on the root so that it cannot be freed if
-                * this thread releases the MMU lock and yields in this loop.
-                */
-               kvm_mmu_get_root(kvm, root);
-
                spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn,
                                slot->base_gfn + slot->npages);
-
-               kvm_mmu_put_root(kvm, root);
        }
        return spte_set;
 }
@@ -1089,21 +1089,13 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
        struct kvm_mmu_page *root;
        int root_as_id;
 
-       for_each_tdp_mmu_root(kvm, root) {
+       for_each_tdp_mmu_root_yield_safe(kvm, root) {
                root_as_id = kvm_mmu_page_as_id(root);
                if (root_as_id != slot->as_id)
                        continue;
 
-               /*
-                * Take a reference on the root so that it cannot be freed if
-                * this thread releases the MMU lock and yields in this loop.
-                */
-               kvm_mmu_get_root(kvm, root);
-
                zap_collapsible_spte_range(kvm, root, slot->base_gfn,
                                           slot->base_gfn + slot->npages);
-
-               kvm_mmu_put_root(kvm, root);
        }
 }
 
@@ -1160,16 +1152,19 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
  * Return the level of the lowest level SPTE added to sptes.
  * That SPTE may be non-present.
  */
-int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes)
+int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
+                        int *root_level)
 {
        struct tdp_iter iter;
        struct kvm_mmu *mmu = vcpu->arch.mmu;
-       int leaf = vcpu->arch.mmu->shadow_root_level;
        gfn_t gfn = addr >> PAGE_SHIFT;
+       int leaf = -1;
+
+       *root_level = vcpu->arch.mmu->shadow_root_level;
 
        tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
                leaf = iter.level;
-               sptes[leaf - 1] = iter.old_spte;
+               sptes[leaf] = iter.old_spte;
        }
 
        return leaf;
index 556e065503f69b25e00a2e582f2e3418d11b16d5..cbbdbadd1526ffc1cd7e79a7cc959facd7cfd8c1 100644 (file)
@@ -44,5 +44,7 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
                                   struct kvm_memory_slot *slot, gfn_t gfn);
 
-int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes);
+int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
+                        int *root_level);
+
 #endif /* __KVM_X86_MMU_TDP_MMU_H */
index b0b667456b2e7edba622291ad2d693ba1aa49add..cb4c6ee10029c96d97460250315516468d187d83 100644 (file)
@@ -199,6 +199,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
 static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
+
        if (!nested_svm_vmrun_msrpm(svm)) {
                vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
                vcpu->run->internal.suberror =
@@ -595,6 +596,8 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
        svm->nested.vmcb12_gpa = 0;
        WARN_ON_ONCE(svm->nested.nested_run_pending);
 
+       kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
+
        /* in case we halted in L2 */
        svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
 
@@ -754,6 +757,7 @@ void svm_leave_nested(struct vcpu_svm *svm)
                leave_guest_mode(&svm->vcpu);
                copy_vmcb_control_area(&vmcb->control, &hsave->control);
                nested_svm_uninit_mmu_context(&svm->vcpu);
+               vmcb_mark_all_dirty(svm->vmcb);
        }
 
        kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
@@ -1194,6 +1198,10 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
         * in the registers, the save area of the nested state instead
         * contains saved L1 state.
         */
+
+       svm->nested.nested_run_pending =
+               !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
+
        copy_vmcb_control_area(&hsave->control, &svm->vmcb->control);
        hsave->save = *save;
 
index 9858d5ae9dddde0dae70bc8359526eecdc375ecd..c8ffdbc81709ee75f88824b26c1dda9d7d165dc6 100644 (file)
@@ -1563,6 +1563,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
                        goto vmgexit_err;
                break;
        case SVM_VMGEXIT_NMI_COMPLETE:
+       case SVM_VMGEXIT_AP_HLT_LOOP:
        case SVM_VMGEXIT_AP_JUMP_TABLE:
        case SVM_VMGEXIT_UNSUPPORTED_EVENT:
                break;
@@ -1888,6 +1889,9 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
        case SVM_VMGEXIT_NMI_COMPLETE:
                ret = svm_invoke_exit_handler(svm, SVM_EXIT_IRET);
                break;
+       case SVM_VMGEXIT_AP_HLT_LOOP:
+               ret = kvm_emulate_ap_reset_hold(&svm->vcpu);
+               break;
        case SVM_VMGEXIT_AP_JUMP_TABLE: {
                struct kvm_sev_info *sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
 
@@ -2001,7 +2005,7 @@ void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu)
         * of which one step is to perform a VMLOAD. Since hardware does not
         * perform a VMSAVE on VMRUN, the host savearea must be updated.
         */
-       asm volatile(__ex("vmsave") : : "a" (__sme_page_pa(sd->save_area)) : "memory");
+       asm volatile(__ex("vmsave %0") : : "a" (__sme_page_pa(sd->save_area)) : "memory");
 
        /*
         * Certain MSRs are restored on VMEXIT, only save ones that aren't
@@ -2040,3 +2044,21 @@ void sev_es_vcpu_put(struct vcpu_svm *svm)
                wrmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]);
        }
 }
+
+void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
+{
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       /* First SIPI: Use the values as initially set by the VMM */
+       if (!svm->received_first_sipi) {
+               svm->received_first_sipi = true;
+               return;
+       }
+
+       /*
+        * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
+        * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
+        * non-zero value.
+        */
+       ghcb_set_sw_exit_info_2(svm->ghcb, 1);
+}
index cce0143a6f8015a61a575dc66ec3a3ce3a7cc28a..7ef171790d02b669ffaa677cf0519972774ea384 100644 (file)
@@ -3677,8 +3677,6 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
        return EXIT_FASTPATH_NONE;
 }
 
-void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
-
 static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
                                        struct vcpu_svm *svm)
 {
@@ -4384,6 +4382,14 @@ static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
                   (vmcb_is_intercept(&svm->vmcb->control, INTERCEPT_INIT));
 }
 
+static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
+{
+       if (!sev_es_guest(vcpu->kvm))
+               return kvm_vcpu_deliver_sipi_vector(vcpu, vector);
+
+       sev_vcpu_deliver_sipi_vector(vcpu, vector);
+}
+
 static void svm_vm_destroy(struct kvm *kvm)
 {
        avic_vm_destroy(kvm);
@@ -4526,6 +4532,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
 
        .msr_filter_changed = svm_msr_filter_changed,
        .complete_emulated_msr = svm_complete_emulated_msr,
+
+       .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
 };
 
 static struct kvm_x86_init_ops svm_init_ops __initdata = {
index 5431e6335e2e8d3ba69cae3a6f71e7ecaafdd32a..0fe874ae54982e146ee4ffda756ac80470d73bcb 100644 (file)
@@ -185,6 +185,7 @@ struct vcpu_svm {
        struct vmcb_save_area *vmsa;
        struct ghcb *ghcb;
        struct kvm_host_map ghcb_map;
+       bool received_first_sipi;
 
        /* SEV-ES scratch area support */
        void *ghcb_sa;
@@ -591,6 +592,7 @@ void sev_es_init_vmcb(struct vcpu_svm *svm);
 void sev_es_create_vcpu(struct vcpu_svm *svm);
 void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu);
 void sev_es_vcpu_put(struct vcpu_svm *svm);
+void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
 
 /* vmenter.S */
 
index e2f26564a12de62a0405e0bf104610a581ccca91..0fbb46990dfce110aa23e62a84c343b4c408eabc 100644 (file)
@@ -4442,6 +4442,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
        /* trying to cancel vmlaunch/vmresume is a bug */
        WARN_ON_ONCE(vmx->nested.nested_run_pending);
 
+       kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
+
        /* Service the TLB flush request for L2 before switching to L1. */
        if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
                kvm_vcpu_flush_tlb_current(vcpu);
index 75c9c6a0a3a454825541b0d2a290653c295ab981..2af05d3b0590985ce22ee26e6a83b0848b6043ef 100644 (file)
@@ -7707,6 +7707,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
        .msr_filter_changed = vmx_msr_filter_changed,
        .complete_emulated_msr = kvm_complete_insn_gp,
        .cpu_dirty_log_size = vmx_cpu_dirty_log_size,
+
+       .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
 };
 
 static __init int hardware_setup(void)
index 3f7c1fc7a3ce5ce9990dacbe2ef1cede75927a31..9a8969a6dd06362a84ad6989f34d48b51e59ae24 100644 (file)
@@ -7976,17 +7976,22 @@ void kvm_arch_exit(void)
        kmem_cache_destroy(x86_fpu_cache);
 }
 
-int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
+static int __kvm_vcpu_halt(struct kvm_vcpu *vcpu, int state, int reason)
 {
        ++vcpu->stat.halt_exits;
        if (lapic_in_kernel(vcpu)) {
-               vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
+               vcpu->arch.mp_state = state;
                return 1;
        } else {
-               vcpu->run->exit_reason = KVM_EXIT_HLT;
+               vcpu->run->exit_reason = reason;
                return 0;
        }
 }
+
+int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
+{
+       return __kvm_vcpu_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT);
+}
 EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
 
 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
@@ -8000,6 +8005,14 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
 
+int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
+{
+       int ret = kvm_skip_emulated_instruction(vcpu);
+
+       return __kvm_vcpu_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD, KVM_EXIT_AP_RESET_HOLD) && ret;
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold);
+
 #ifdef CONFIG_X86_64
 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
                                unsigned long clock_type)
@@ -8789,7 +8802,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
        if (kvm_request_pending(vcpu)) {
                if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
-                       if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
+                       if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
+                               ;
+                       else if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
                                r = 0;
                                goto out;
                        }
@@ -9094,6 +9109,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
        kvm_apic_accept_events(vcpu);
        switch(vcpu->arch.mp_state) {
        case KVM_MP_STATE_HALTED:
+       case KVM_MP_STATE_AP_RESET_HOLD:
                vcpu->arch.pv.pv_unhalted = false;
                vcpu->arch.mp_state =
                        KVM_MP_STATE_RUNNABLE;
@@ -9520,8 +9536,9 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
                kvm_load_guest_fpu(vcpu);
 
        kvm_apic_accept_events(vcpu);
-       if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED &&
-                                       vcpu->arch.pv.pv_unhalted)
+       if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED ||
+            vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) &&
+           vcpu->arch.pv.pv_unhalted)
                mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
        else
                mp_state->mp_state = vcpu->arch.mp_state;
@@ -10152,6 +10169,7 @@ void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
        kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
        kvm_rip_write(vcpu, 0);
 }
+EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector);
 
 int kvm_arch_hardware_enable(void)
 {
index dfd82f51ba66bc1a048c4e81f4c4dbbd5eb5f09d..f6a9e2e3664259e56844c54385e3f97409900b07 100644 (file)
@@ -829,6 +829,8 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr)
        }
 
        free_page((unsigned long)pmd_sv);
+
+       pgtable_pmd_page_dtor(virt_to_page(pmd));
        free_page((unsigned long)pmd);
 
        return 1;
index 9e87ab010c82bbbe02798d17be75ba6171d199f6..e68ea5f4ad1ce0c3bfadafec4e01dc44fc2664e5 100644 (file)
@@ -164,10 +164,10 @@ static int xen_cpu_up_prepare_hvm(unsigned int cpu)
        else
                per_cpu(xen_vcpu_id, cpu) = cpu;
        rc = xen_vcpu_setup(cpu);
-       if (rc)
+       if (rc || !xen_have_vector_callback)
                return rc;
 
-       if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
+       if (xen_feature(XENFEAT_hvm_safe_pvclock))
                xen_setup_timer(cpu);
 
        rc = xen_smp_intr_init(cpu);
@@ -188,6 +188,8 @@ static int xen_cpu_dead_hvm(unsigned int cpu)
        return 0;
 }
 
+static bool no_vector_callback __initdata;
+
 static void __init xen_hvm_guest_init(void)
 {
        if (xen_pv_domain())
@@ -207,7 +209,7 @@ static void __init xen_hvm_guest_init(void)
 
        xen_panic_handler_init();
 
-       if (xen_feature(XENFEAT_hvm_callback_vector))
+       if (!no_vector_callback && xen_feature(XENFEAT_hvm_callback_vector))
                xen_have_vector_callback = 1;
 
        xen_hvm_smp_init();
@@ -233,6 +235,13 @@ static __init int xen_parse_nopv(char *arg)
 }
 early_param("xen_nopv", xen_parse_nopv);
 
+static __init int xen_parse_no_vector_callback(char *arg)
+{
+       no_vector_callback = true;
+       return 0;
+}
+early_param("xen_no_vector_callback", xen_parse_no_vector_callback);
+
 bool __init xen_hvm_need_lapic(void)
 {
        if (xen_pv_domain())
index f5e7db4f82abb63039ce49271ecbe87f857a1655..6ff3c887e0b99523cd69774b8de8f3009f69d92f 100644 (file)
@@ -33,9 +33,11 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
        int cpu;
 
        native_smp_prepare_cpus(max_cpus);
-       WARN_ON(xen_smp_intr_init(0));
 
-       xen_init_lock_cpu(0);
+       if (xen_have_vector_callback) {
+               WARN_ON(xen_smp_intr_init(0));
+               xen_init_lock_cpu(0);
+       }
 
        for_each_possible_cpu(cpu) {
                if (cpu == 0)
@@ -50,9 +52,11 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
 static void xen_hvm_cpu_die(unsigned int cpu)
 {
        if (common_cpu_die(cpu) == 0) {
-               xen_smp_intr_free(cpu);
-               xen_uninit_lock_cpu(cpu);
-               xen_teardown_timer(cpu);
+               if (xen_have_vector_callback) {
+                       xen_smp_intr_free(cpu);
+                       xen_uninit_lock_cpu(cpu);
+                       xen_teardown_timer(cpu);
+               }
        }
 }
 #else
@@ -64,14 +68,19 @@ static void xen_hvm_cpu_die(unsigned int cpu)
 
 void __init xen_hvm_smp_init(void)
 {
-       if (!xen_have_vector_callback)
+       smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
+       smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
+       smp_ops.smp_cpus_done = xen_smp_cpus_done;
+       smp_ops.cpu_die = xen_hvm_cpu_die;
+
+       if (!xen_have_vector_callback) {
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+               nopvspin = true;
+#endif
                return;
+       }
 
-       smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
        smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
-       smp_ops.cpu_die = xen_hvm_cpu_die;
        smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
        smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
-       smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
-       smp_ops.smp_cpus_done = xen_smp_cpus_done;
 }
index 9718e9593564735da7130b4498777947241d804d..854c5e07e86703f4f8681cc51f8751fdcf917bd3 100644 (file)
@@ -2,7 +2,6 @@
 generated-y += syscall_table.h
 generic-y += extable.h
 generic-y += kvm_para.h
-generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += param.h
 generic-y += qrwlock.h
index 9e81d1052091fcbc9a71bdf6aa36bcfa50a221f7..9e4eb0fc1c16e7fb2ecce1091c499de6a0878912 100644 (file)
@@ -6332,13 +6332,13 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
         * limit 'something'.
         */
        /* no more than 50% of tags for async I/O */
-       bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
+       bfqd->word_depths[0][0] = max(bt->sb.depth >> 1, 1U);
        /*
         * no more than 75% of tags for sync writes (25% extra tags
         * w.r.t. async I/O, to prevent async I/O from starving sync
         * writes)
         */
-       bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
+       bfqd->word_depths[0][1] = max((bt->sb.depth * 3) >> 2, 1U);
 
        /*
         * In-word depths in case some bfq_queue is being weight-
@@ -6348,9 +6348,9 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
         * shortage.
         */
        /* no more than ~18% of tags for async I/O */
-       bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
+       bfqd->word_depths[1][0] = max((bt->sb.depth * 3) >> 4, 1U);
        /* no more than ~37% of tags for sync writes (~20% extra tags) */
-       bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
+       bfqd->word_depths[1][1] = max((bt->sb.depth * 6) >> 4, 1U);
 
        for (i = 0; i < 2; i++)
                for (j = 0; j < 2; j++)
index 96e5fcd7f071b606c62d946354080aada4f4b8b7..7663a9b94b8002ad710231ec726dfd4cc7c23513 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/bio.h>
 #include <linux/blkdev.h>
 #include <linux/blk-mq.h>
+#include <linux/blk-pm.h>
 #include <linux/highmem.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
@@ -424,11 +425,11 @@ EXPORT_SYMBOL(blk_cleanup_queue);
 /**
  * blk_queue_enter() - try to increase q->q_usage_counter
  * @q: request queue pointer
- * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT
+ * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
  */
 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
 {
-       const bool pm = flags & BLK_MQ_REQ_PREEMPT;
+       const bool pm = flags & BLK_MQ_REQ_PM;
 
        while (true) {
                bool success = false;
@@ -440,7 +441,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
                         * responsible for ensuring that that counter is
                         * globally visible before the queue is unfrozen.
                         */
-                       if (pm || !blk_queue_pm_only(q)) {
+                       if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
+                           !blk_queue_pm_only(q)) {
                                success = true;
                        } else {
                                percpu_ref_put(&q->q_usage_counter);
@@ -465,8 +467,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
 
                wait_event(q->mq_freeze_wq,
                           (!q->mq_freeze_depth &&
-                           (pm || (blk_pm_request_resume(q),
-                                   !blk_queue_pm_only(q)))) ||
+                           blk_pm_resume_queue(pm, q)) ||
                           blk_queue_dying(q));
                if (blk_queue_dying(q))
                        return -ENODEV;
@@ -630,7 +631,7 @@ struct request *blk_get_request(struct request_queue *q, unsigned int op,
        struct request *req;
 
        WARN_ON_ONCE(op & REQ_NOWAIT);
-       WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT));
+       WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
 
        req = blk_mq_alloc_request(q, op, flags);
        if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
index ac6078a34939426a27e272f970ecca99a6ed0f92..98d656bdb42b7c0226203ccfedb178c1fdb6fa0c 100644 (file)
@@ -2551,8 +2551,8 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
        bool use_debt, ioc_locked;
        unsigned long flags;
 
-       /* bypass IOs if disabled or for root cgroup */
-       if (!ioc->enabled || !iocg->level)
+       /* bypass IOs if disabled, still initializing, or for root cgroup */
+       if (!ioc->enabled || !iocg || !iocg->level)
                return;
 
        /* calculate the absolute vtime cost */
@@ -2679,14 +2679,14 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
                           struct bio *bio)
 {
        struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
-       struct ioc *ioc = iocg->ioc;
+       struct ioc *ioc = rqos_to_ioc(rqos);
        sector_t bio_end = bio_end_sector(bio);
        struct ioc_now now;
        u64 vtime, abs_cost, cost;
        unsigned long flags;
 
-       /* bypass if disabled or for root cgroup */
-       if (!ioc->enabled || !iocg->level)
+       /* bypass if disabled, still initializing, or for root cgroup */
+       if (!ioc->enabled || !iocg || !iocg->level)
                return;
 
        abs_cost = calc_vtime_cost(bio, iocg, true);
@@ -2863,6 +2863,12 @@ static int blk_iocost_init(struct request_queue *q)
        ioc_refresh_params(ioc, true);
        spin_unlock_irq(&ioc->lock);
 
+       /*
+        * rqos must be added before activation to allow iocg_pd_init() to
+        * lookup the ioc from q. This means that the rqos methods may get
+        * called before policy activation completion, can't assume that the
+        * target bio has an iocg associated and need to test for NULL iocg.
+        */
        rq_qos_add(q, rqos);
        ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
        if (ret) {
index 3094542e12ae0fa468f242a897b1d08b35fbe83f..4de03da9a624b8596008644ac1d3934b8957722e 100644 (file)
@@ -129,6 +129,7 @@ static const char *const blk_queue_flag_name[] = {
        QUEUE_FLAG_NAME(PCI_P2PDMA),
        QUEUE_FLAG_NAME(ZONE_RESETALL),
        QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
+       QUEUE_FLAG_NAME(NOWAIT),
 };
 #undef QUEUE_FLAG_NAME
 
@@ -245,6 +246,7 @@ static const char *const hctx_flag_name[] = {
        HCTX_FLAG_NAME(BLOCKING),
        HCTX_FLAG_NAME(NO_SCHED),
        HCTX_FLAG_NAME(STACKING),
+       HCTX_FLAG_NAME(TAG_HCTX_SHARED),
 };
 #undef HCTX_FLAG_NAME
 
@@ -297,7 +299,6 @@ static const char *const rqf_name[] = {
        RQF_NAME(MIXED_MERGE),
        RQF_NAME(MQ_INFLIGHT),
        RQF_NAME(DONTPREP),
-       RQF_NAME(PREEMPT),
        RQF_NAME(FAILED),
        RQF_NAME(QUIET),
        RQF_NAME(ELVPRIV),
index c338c9bc5a2c53d22331f45801a3b5dfbc299dda..f285a9123a8b081deeae72282306948aeec928b4 100644 (file)
@@ -294,8 +294,8 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
        rq->mq_hctx = data->hctx;
        rq->rq_flags = 0;
        rq->cmd_flags = data->cmd_flags;
-       if (data->flags & BLK_MQ_REQ_PREEMPT)
-               rq->rq_flags |= RQF_PREEMPT;
+       if (data->flags & BLK_MQ_REQ_PM)
+               rq->rq_flags |= RQF_PM;
        if (blk_queue_io_stat(data->q))
                rq->rq_flags |= RQF_IO_STAT;
        INIT_LIST_HEAD(&rq->queuelist);
index b85234d758f7b2d6d75734fe9d74a0e5e03bdcc2..17bd020268d421434a3724ad63552f2df49da0f4 100644 (file)
@@ -67,6 +67,10 @@ int blk_pre_runtime_suspend(struct request_queue *q)
 
        WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE);
 
+       spin_lock_irq(&q->queue_lock);
+       q->rpm_status = RPM_SUSPENDING;
+       spin_unlock_irq(&q->queue_lock);
+
        /*
         * Increase the pm_only counter before checking whether any
         * non-PM blk_queue_enter() calls are in progress to avoid that any
@@ -89,15 +93,14 @@ int blk_pre_runtime_suspend(struct request_queue *q)
        /* Switch q_usage_counter back to per-cpu mode. */
        blk_mq_unfreeze_queue(q);
 
-       spin_lock_irq(&q->queue_lock);
-       if (ret < 0)
+       if (ret < 0) {
+               spin_lock_irq(&q->queue_lock);
+               q->rpm_status = RPM_ACTIVE;
                pm_runtime_mark_last_busy(q->dev);
-       else
-               q->rpm_status = RPM_SUSPENDING;
-       spin_unlock_irq(&q->queue_lock);
+               spin_unlock_irq(&q->queue_lock);
 
-       if (ret)
                blk_clear_pm_only(q);
+       }
 
        return ret;
 }
index ea5507d23e75976e0c234c1a4951fd443e86811c..a2283cc9f716dc89622a5966fd6e894d8ddefee9 100644 (file)
@@ -6,11 +6,14 @@
 #include <linux/pm_runtime.h>
 
 #ifdef CONFIG_PM
-static inline void blk_pm_request_resume(struct request_queue *q)
+static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
 {
-       if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
-                      q->rpm_status == RPM_SUSPENDING))
-               pm_request_resume(q->dev);
+       if (!q->dev || !blk_queue_pm_only(q))
+               return 1;       /* Nothing to do */
+       if (pm && q->rpm_status != RPM_SUSPENDED)
+               return 1;       /* Request allowed */
+       pm_request_resume(q->dev);
+       return 0;
 }
 
 static inline void blk_pm_mark_last_busy(struct request *rq)
@@ -44,8 +47,9 @@ static inline void blk_pm_put_request(struct request *rq)
                --rq->q->nr_pending;
 }
 #else
-static inline void blk_pm_request_resume(struct request_queue *q)
+static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
 {
+       return 1;
 }
 
 static inline void blk_pm_mark_last_busy(struct request *rq)
index 73faec438e49a88d308bb438c9e170dcacdc986e..419548e92d82f6216995293928b1c0e2f63f1d4c 100644 (file)
@@ -246,15 +246,18 @@ struct block_device *disk_part_iter_next(struct disk_part_iter *piter)
                part = rcu_dereference(ptbl->part[piter->idx]);
                if (!part)
                        continue;
+               piter->part = bdgrab(part);
+               if (!piter->part)
+                       continue;
                if (!bdev_nr_sectors(part) &&
                    !(piter->flags & DISK_PITER_INCL_EMPTY) &&
                    !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
-                     piter->idx == 0))
+                     piter->idx == 0)) {
+                       bdput(piter->part);
+                       piter->part = NULL;
                        continue;
+               }
 
-               piter->part = bdgrab(part);
-               if (!piter->part)
-                       continue;
                piter->idx += inc;
                break;
        }
index 511932aa94a6f5a585c52fbf45d7575e132e83cc..0959613560b9e4c3d817fd5363fec12923ec9f33 100644 (file)
@@ -354,7 +354,7 @@ static uint32_t derive_pub_key(const void *pub_key, uint32_t len, uint8_t *buf)
        memcpy(cur, e, sizeof(e));
        cur += sizeof(e);
        /* Zero parameters to satisfy set_pub_key ABI. */
-       memset(cur, 0, SETKEY_PARAMS_SIZE);
+       memzero_explicit(cur, SETKEY_PARAMS_SIZE);
 
        return cur - buf;
 }
index 8892908ad58ce40c2b868c48c957cc49c49d1ad9..788a4ba1e2e747de90b600b56a5b610ba4eb935b 100644 (file)
@@ -356,7 +356,8 @@ int public_key_verify_signature(const struct public_key *pkey,
        if (ret)
                goto error_free_key;
 
-       if (strcmp(sig->pkey_algo, "sm2") == 0 && sig->data_size) {
+       if (sig->pkey_algo && strcmp(sig->pkey_algo, "sm2") == 0 &&
+           sig->data_size) {
                ret = cert_sig_digest_update(sig, tfm);
                if (ret)
                        goto error_free_key;
index d56b8603dec95a9fca11c4fb2c5dafdf4f5d2fcf..96f80c8f8e30484c2c1b656d91d12a0e82eeee9f 100644 (file)
@@ -39,7 +39,8 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
        struct ecdh params;
        unsigned int ndigits;
 
-       if (crypto_ecdh_decode_key(buf, len, &params) < 0)
+       if (crypto_ecdh_decode_key(buf, len, &params) < 0 ||
+           params.key_size > sizeof(ctx->private_key))
                return -EINVAL;
 
        ndigits = ecdh_supported_curve(params.curve_id);
index eacbf4f939900fe4f280f053c5ee1ac45a75a0c2..8f899f898ec9f903734693cd6f4ba8a593bbcc14 100644 (file)
@@ -107,6 +107,8 @@ do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
        preempt_enable();
 
        // bytes/ns == GB/s, multiply by 1000 to get MB/s [not MiB/s]
+       if (!min)
+               min = 1;
        speed = (1000 * REPS * BENCH_SIZE) / (unsigned int)ktime_to_ns(min);
        tmpl->speed = speed;
 
index edf1558c110521e0f364b4079a720f441cecc6d8..ebcf534514be39b6637130c7d527d5cf21bd1b71 100644 (file)
@@ -395,9 +395,6 @@ config ACPI_CONTAINER
 
          This helps support hotplug of nodes, CPUs, and memory.
 
-         To compile this driver as a module, choose M here:
-         the module will be called container.
-
 config ACPI_HOTPLUG_MEMORY
        bool "Memory Hotplug"
        depends on MEMORY_HOTPLUG
@@ -411,9 +408,6 @@ config ACPI_HOTPLUG_MEMORY
          removing memory devices at runtime, you need not enable
          this driver.
 
-         To compile this driver as a module, choose M here:
-         the module will be called acpi_memhotplug.
-
 config ACPI_HOTPLUG_IOAPIC
        bool
        depends on PCI
index cb229e24c56375b0a89255667436abc46fa9b0d3..e6a5d997241c43d803d9758b01328db8b6cdb0cc 100644 (file)
@@ -97,7 +97,7 @@ void acpi_scan_table_handler(u32 event, void *table, void *context);
 extern struct list_head acpi_bus_id_list;
 
 struct acpi_device_bus_id {
-       char bus_id[15];
+       const char *bus_id;
        unsigned int instance_no;
        struct list_head node;
 };
index 80b668c80073a58417e6bd6a2ad0d1ddc0f28d31..58ff36340cd7c595521e753897da6d2dfc2a91be 100644 (file)
@@ -486,6 +486,7 @@ static void acpi_device_del(struct acpi_device *device)
                                acpi_device_bus_id->instance_no--;
                        else {
                                list_del(&acpi_device_bus_id->node);
+                               kfree_const(acpi_device_bus_id->bus_id);
                                kfree(acpi_device_bus_id);
                        }
                        break;
@@ -674,7 +675,14 @@ int acpi_device_add(struct acpi_device *device,
        }
        if (!found) {
                acpi_device_bus_id = new_bus_id;
-               strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device));
+               acpi_device_bus_id->bus_id =
+                       kstrdup_const(acpi_device_hid(device), GFP_KERNEL);
+               if (!acpi_device_bus_id->bus_id) {
+                       pr_err(PREFIX "Memory allocation error for bus id\n");
+                       result = -ENOMEM;
+                       goto err_free_new_bus_id;
+               }
+
                acpi_device_bus_id->instance_no = 0;
                list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
        }
@@ -709,6 +717,11 @@ int acpi_device_add(struct acpi_device *device,
        if (device->parent)
                list_del(&device->node);
        list_del(&device->wakeup_list);
+
+ err_free_new_bus_id:
+       if (!found)
+               kfree(new_bus_id);
+
        mutex_unlock(&acpi_device_lock);
 
  err_detach:
index 25fea34b544c6911eb9489b2d48ff31ad7f80b25..2b69536cdccbafdf9f50e142e9374a54b541bbfa 100644 (file)
@@ -105,18 +105,8 @@ static void lpi_device_get_constraints_amd(void)
 
        for (i = 0; i < out_obj->package.count; i++) {
                union acpi_object *package = &out_obj->package.elements[i];
-               struct lpi_device_info_amd info = { };
 
-               if (package->type == ACPI_TYPE_INTEGER) {
-                       switch (i) {
-                       case 0:
-                               info.revision = package->integer.value;
-                               break;
-                       case 1:
-                               info.count = package->integer.value;
-                               break;
-                       }
-               } else if (package->type == ACPI_TYPE_PACKAGE) {
+               if (package->type == ACPI_TYPE_PACKAGE) {
                        lpi_constraints_table = kcalloc(package->package.count,
                                                        sizeof(*lpi_constraints_table),
                                                        GFP_KERNEL);
@@ -135,12 +125,10 @@ static void lpi_device_get_constraints_amd(void)
 
                                for (k = 0; k < info_obj->package.count; ++k) {
                                        union acpi_object *obj = &info_obj->package.elements[k];
-                                       union acpi_object *obj_new;
 
                                        list = &lpi_constraints_table[lpi_constraints_table_size];
                                        list->min_dstate = -1;
 
-                                       obj_new = &obj[k];
                                        switch (k) {
                                        case 0:
                                                dev_info.enabled = obj->integer.value;
index 65a3886f68c9e391722b99d0ebe4d45fc3de7fe9..5f0472c18bcbd791bbc74a5aff8fe79ff2a76fd1 100644 (file)
@@ -3607,7 +3607,7 @@ static int idt77252_init_one(struct pci_dev *pcidev,
 
        if ((err = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)))) {
                printk("idt77252: can't enable DMA for PCI device at %s\n", pci_name(pcidev));
-               return err;
+               goto err_out_disable_pdev;
        }
 
        card = kzalloc(sizeof(struct idt77252_dev), GFP_KERNEL);
index 25e08e5f40bd93afe735379abfd04ec9ed160ce6..14f1658167425a5d20f26cd3d73bcf4ad1a17948 100644 (file)
@@ -4414,6 +4414,12 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
  *
  * Set the device's firmware node pointer to @fwnode, but if a secondary
  * firmware node of the device is present, preserve it.
+ *
+ * Valid fwnode cases are:
+ *  - primary --> secondary --> -ENODEV
+ *  - primary --> NULL
+ *  - secondary --> -ENODEV
+ *  - NULL
  */
 void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
 {
@@ -4432,8 +4438,9 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
        } else {
                if (fwnode_is_primary(fn)) {
                        dev->fwnode = fn->secondary;
+                       /* Set fn->secondary = NULL, so fn remains the primary fwnode */
                        if (!(parent && fn == parent->fwnode))
-                               fn->secondary = ERR_PTR(-ENODEV);
+                               fn->secondary = NULL;
                } else {
                        dev->fwnode = NULL;
                }
index 8dfac7f3ed7aa49ca2db435fb295d70a34bc2730..ff2ee87987c7e6b194588713f19a38fbcacd0308 100644 (file)
@@ -582,8 +582,12 @@ void regmap_debugfs_init(struct regmap *map)
                devname = dev_name(map->dev);
 
        if (name) {
-               map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
+               if (!map->debugfs_name) {
+                       map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
                                              devname, name);
+                       if (!map->debugfs_name)
+                               return;
+               }
                name = map->debugfs_name;
        } else {
                name = devname;
@@ -591,9 +595,10 @@ void regmap_debugfs_init(struct regmap *map)
 
        if (!strcmp(name, "dummy")) {
                kfree(map->debugfs_name);
-
                map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
                                                dummy_index);
+               if (!map->debugfs_name)
+                               return;
                name = map->debugfs_name;
                dummy_index++;
        }
index 262326973ee01a2ce2f233271833c2d1d685e4ff..583b671b1d2d2b0bc1ebe64c611a7d3397227f13 100644 (file)
@@ -445,6 +445,7 @@ config BLK_DEV_RBD
 config BLK_DEV_RSXX
        tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver"
        depends on PCI
+       select CRC32
        help
          Device driver for IBM's high speed PCIe SSD
          storage device: Flash Adapter 900GB Full Height.
index 4b6d3d816d1f555977ae98559838455aeda3b252..2ff05a0d26461e7aa26cc322d2237c0f8c153fda 100644 (file)
@@ -7,6 +7,7 @@ config BLK_DEV_RNBD_CLIENT
        tristate "RDMA Network Block Device driver client"
        depends on INFINIBAND_RTRS_CLIENT
        select BLK_DEV_RNBD
+       select SG_POOL
        help
          RNBD client is a network block device driver using rdma transport.
 
index 1773c0aa0bd436f8446a84530504ba941e6a7b52..080f58a5400ada6017f8b71e00599bf473778a87 100644 (file)
@@ -90,3 +90,4 @@ Kleber Souza <kleber.souza@profitbricks.com>
 Lutz Pogrell <lutz.pogrell@cloud.ionos.com>
 Milind Dumbare <Milind.dumbare@gmail.com>
 Roman Penyaev <roman.penyaev@profitbricks.com>
+Swapnil Ingle <ingleswapnil@gmail.com>
index 96e3f9fe82418d00e1fe4a13e564ead1e4c79ab2..45a4700766524bd552ee84c18655abb5a2b1e922 100644 (file)
@@ -375,12 +375,19 @@ static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess,
        init_waitqueue_head(&iu->comp.wait);
        iu->comp.errno = INT_MAX;
 
+       if (sg_alloc_table(&iu->sgt, 1, GFP_KERNEL)) {
+               rnbd_put_permit(sess, permit);
+               kfree(iu);
+               return NULL;
+       }
+
        return iu;
 }
 
 static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu)
 {
        if (atomic_dec_and_test(&iu->refcount)) {
+               sg_free_table(&iu->sgt);
                rnbd_put_permit(sess, iu->permit);
                kfree(iu);
        }
@@ -487,8 +494,6 @@ static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
        iu->buf = NULL;
        iu->dev = dev;
 
-       sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
-
        msg.hdr.type    = cpu_to_le16(RNBD_MSG_CLOSE);
        msg.device_id   = cpu_to_le32(device_id);
 
@@ -502,7 +507,6 @@ static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
                err = errno;
        }
 
-       sg_free_table(&iu->sgt);
        rnbd_put_iu(sess, iu);
        return err;
 }
@@ -575,7 +579,6 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
        iu->buf = rsp;
        iu->dev = dev;
 
-       sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
        sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
 
        msg.hdr.type    = cpu_to_le16(RNBD_MSG_OPEN);
@@ -594,7 +597,6 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
                err = errno;
        }
 
-       sg_free_table(&iu->sgt);
        rnbd_put_iu(sess, iu);
        return err;
 }
@@ -622,8 +624,6 @@ static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait)
 
        iu->buf = rsp;
        iu->sess = sess;
-
-       sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
        sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
 
        msg.hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO);
@@ -650,7 +650,6 @@ put_iu:
        } else {
                err = errno;
        }
-       sg_free_table(&iu->sgt);
        rnbd_put_iu(sess, iu);
        return err;
 }
@@ -1698,7 +1697,8 @@ static void rnbd_destroy_sessions(void)
         */
 
        list_for_each_entry_safe(sess, sn, &sess_list, list) {
-               WARN_ON(!rnbd_clt_get_sess(sess));
+               if (!rnbd_clt_get_sess(sess))
+                       continue;
                close_rtrs(sess);
                list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
                        /*
index b8e44331e4944e6869a90d2ef99d373b69dffdf6..a6a68d44f517ceae67b1069f9e54df013551bc28 100644 (file)
@@ -338,10 +338,12 @@ static int rnbd_srv_link_ev(struct rtrs_srv *rtrs,
 
 void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev)
 {
-       mutex_lock(&sess_dev->sess->lock);
-       rnbd_srv_destroy_dev_session_sysfs(sess_dev);
-       mutex_unlock(&sess_dev->sess->lock);
+       struct rnbd_srv_session *sess = sess_dev->sess;
+
        sess_dev->keep_id = true;
+       mutex_lock(&sess->lock);
+       rnbd_srv_destroy_dev_session_sysfs(sess_dev);
+       mutex_unlock(&sess->lock);
 }
 
 static int process_msg_close(struct rtrs_srv *rtrs,
index 37244a7e68c229d478e36389efbfceba7cec5306..9cf249c344d9e415877c2f1a8b89ad1fa4d5f958 100644 (file)
@@ -1256,6 +1256,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
        { TEGRA30_CLK_I2S3_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
        { TEGRA30_CLK_I2S4_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
        { TEGRA30_CLK_VIMCLK_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+       { TEGRA30_CLK_HDA, TEGRA30_CLK_PLL_P, 102000000, 0 },
+       { TEGRA30_CLK_HDA2CODEC_2X, TEGRA30_CLK_PLL_P, 48000000, 0 },
        /* must be the last entry */
        { TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 },
 };
index 6e23376548ced1196516f600ddf32529884fda85..be05e038d956c75bd910d4651e9fb08faf5ffa90 100644 (file)
@@ -76,11 +76,6 @@ static inline int ceiling_fp(int32_t x)
        return ret;
 }
 
-static inline int32_t percent_fp(int percent)
-{
-       return div_fp(percent, 100);
-}
-
 static inline u64 mul_ext_fp(u64 x, u64 y)
 {
        return (x * y) >> EXT_FRAC_BITS;
@@ -91,11 +86,6 @@ static inline u64 div_ext_fp(u64 x, u64 y)
        return div64_u64(x << EXT_FRAC_BITS, y);
 }
 
-static inline int32_t percent_ext_fp(int percent)
-{
-       return div_ext_fp(percent, 100);
-}
-
 /**
  * struct sample -     Store performance sample
  * @core_avg_perf:     Ratio of APERF/MPERF which is the actual average
@@ -2653,12 +2643,13 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
                                      unsigned long capacity)
 {
        struct cpudata *cpu = all_cpu_data[cpunum];
+       u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
        int old_pstate = cpu->pstate.current_pstate;
        int cap_pstate, min_pstate, max_pstate, target_pstate;
 
        update_turbo_state();
-       cap_pstate = global.turbo_disabled ? cpu->pstate.max_pstate :
-                                            cpu->pstate.turbo_pstate;
+       cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) :
+                                            HWP_HIGHEST_PERF(hwp_cap);
 
        /* Optimization: Avoid unnecessary divisions. */
 
@@ -3086,7 +3077,6 @@ static int __init intel_pstate_init(void)
                        intel_pstate.attr = hwp_cpufreq_attrs;
                        intel_cpufreq.attr = hwp_cpufreq_attrs;
                        intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS;
-                       intel_cpufreq.fast_switch = NULL;
                        intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf;
                        if (!default_driver)
                                default_driver = &intel_pstate;
index 0acc9e241cd7d6e2688973fc1f13f13d7198c964..b9ccb6a3dad98bdf95f2a0f3f255ea12fda95c67 100644 (file)
@@ -878,9 +878,9 @@ static int get_transition_latency(struct powernow_k8_data *data)
 
 /* Take a frequency, and issue the fid/vid transition command */
 static int transition_frequency_fidvid(struct powernow_k8_data *data,
-               unsigned int index)
+               unsigned int index,
+               struct cpufreq_policy *policy)
 {
-       struct cpufreq_policy *policy;
        u32 fid = 0;
        u32 vid = 0;
        int res;
@@ -912,9 +912,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
        freqs.old = find_khz_freq_from_fid(data->currfid);
        freqs.new = find_khz_freq_from_fid(fid);
 
-       policy = cpufreq_cpu_get(smp_processor_id());
-       cpufreq_cpu_put(policy);
-
        cpufreq_freq_transition_begin(policy, &freqs);
        res = transition_fid_vid(data, fid, vid);
        cpufreq_freq_transition_end(policy, &freqs, res);
@@ -969,7 +966,7 @@ static long powernowk8_target_fn(void *arg)
 
        powernow_k8_acpi_pst_values(data, newstate);
 
-       ret = transition_frequency_fidvid(data, newstate);
+       ret = transition_frequency_fidvid(data, newstate, pol);
 
        if (ret) {
                pr_err("transition frequency failed\n");
index bbd51703e738b972a9437476e8cb4b60b31f5c8f..e535f28a80283e36db6ca9d1a9d484078ca40c0d 100644 (file)
@@ -366,6 +366,7 @@ if CRYPTO_DEV_OMAP
 config CRYPTO_DEV_OMAP_SHAM
        tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator"
        depends on ARCH_OMAP2PLUS
+       select CRYPTO_ENGINE
        select CRYPTO_SHA1
        select CRYPTO_MD5
        select CRYPTO_SHA256
index e63684d4cd904149db83cff229226f4b97333fe1..9ad6397aaa97e10b580a23b54836162e765ae7ff 100644 (file)
@@ -76,10 +76,6 @@ static void dma_buf_release(struct dentry *dentry)
 
        dmabuf->ops->release(dmabuf);
 
-       mutex_lock(&db_list.lock);
-       list_del(&dmabuf->list_node);
-       mutex_unlock(&db_list.lock);
-
        if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
                dma_resv_fini(dmabuf->resv);
 
@@ -88,6 +84,22 @@ static void dma_buf_release(struct dentry *dentry)
        kfree(dmabuf);
 }
 
+static int dma_buf_file_release(struct inode *inode, struct file *file)
+{
+       struct dma_buf *dmabuf;
+
+       if (!is_dma_buf_file(file))
+               return -EINVAL;
+
+       dmabuf = file->private_data;
+
+       mutex_lock(&db_list.lock);
+       list_del(&dmabuf->list_node);
+       mutex_unlock(&db_list.lock);
+
+       return 0;
+}
+
 static const struct dentry_operations dma_buf_dentry_ops = {
        .d_dname = dmabuffs_dname,
        .d_release = dma_buf_release,
@@ -413,6 +425,7 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
 }
 
 static const struct file_operations dma_buf_fops = {
+       .release        = dma_buf_file_release,
        .mmap           = dma_buf_mmap_internal,
        .llseek         = dma_buf_llseek,
        .poll           = dma_buf_poll,
index 3c4e343011721e336869567f5292959ec634b1a1..364fc2f3e499549cd3eb89673b1a3dffd8af7977 100644 (file)
@@ -251,6 +251,9 @@ static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
                buffer->vaddr = NULL;
        }
 
+       /* free page list */
+       kfree(buffer->pages);
+       /* release memory */
        cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
        kfree(buffer);
 }
index b971505b87152398cd76a16a8fb3a05fb9767c89..08d71dafa001578b1ab96d422b569555432c9cad 100644 (file)
@@ -86,12 +86,12 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
 
        if (desc->chunk) {
                /* Create and add new element into the linked list */
-               desc->chunks_alloc++;
-               list_add_tail(&chunk->list, &desc->chunk->list);
                if (!dw_edma_alloc_burst(chunk)) {
                        kfree(chunk);
                        return NULL;
                }
+               desc->chunks_alloc++;
+               list_add_tail(&chunk->list, &desc->chunk->list);
        } else {
                /* List head */
                chunk->burst = NULL;
index 266423a2cabc7ddfa773d7e7e198f904fa77069c..4dbb03c545e48abaff38677818be3b579cdbff5e 100644 (file)
@@ -434,7 +434,7 @@ int idxd_register_driver(void)
        return 0;
 
 drv_fail:
-       for (; i > 0; i--)
+       while (--i >= 0)
                driver_unregister(&idxd_drvs[i]->drv);
        return rc;
 }
@@ -1840,7 +1840,7 @@ int idxd_register_bus_type(void)
        return 0;
 
 bus_err:
-       for (; i > 0; i--)
+       while (--i >= 0)
                bus_unregister(idxd_bus_types[i]);
        return rc;
 }
index f133ae8dece16b47827b1872cdd7c4eaa4f0e1c3..6ad8afbb95f2b3d4dc33cda0d4bb4bbd1e2550d8 100644 (file)
@@ -1007,6 +1007,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
        return 0;
 
 err_free:
+       mtk_hsdma_hw_deinit(hsdma);
        of_dma_controller_free(pdev->dev.of_node);
 err_unregister:
        dma_async_device_unregister(dd);
index 584c931e807af3a824a1bd0adefec70195c7e6bc..d29d01e730aa09171eecc60cfd298943b9783c9a 100644 (file)
@@ -350,7 +350,7 @@ static int milbeaut_xdmac_probe(struct platform_device *pdev)
 
        ret = dma_async_device_register(ddev);
        if (ret)
-               return ret;
+               goto disable_xdmac;
 
        ret = of_dma_controller_register(dev->of_node,
                                         of_dma_simple_xlate, mdev);
@@ -363,6 +363,8 @@ static int milbeaut_xdmac_probe(struct platform_device *pdev)
 
 unregister_dmac:
        dma_async_device_unregister(ddev);
+disable_xdmac:
+       disable_xdmac(mdev);
        return ret;
 }
 
index d5773d474d8f5c04b382959e648413636239bbd3..88579857ca1d6c08a342c606001188a56157cf03 100644 (file)
@@ -630,7 +630,7 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
                             GFP_NOWAIT);
 
        if (!async_desc)
-               goto err_out;
+               return NULL;
 
        if (flags & DMA_PREP_FENCE)
                async_desc->flags |= DESC_FLAG_NWD;
@@ -670,10 +670,6 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
        }
 
        return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
-
-err_out:
-       kfree(async_desc);
-       return NULL;
 }
 
 /**
index d2334f535de2a16f7312eb0da0484b9cd9363bb9..1a0bf6b0567a55e6d4ec009daae813cf83eb78dc 100644 (file)
@@ -1416,7 +1416,7 @@ static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
        len = 1 << bit;
        ring->alloc_size = (len + (len - 1));
        dev_dbg(gpii->gpi_dev->dev,
-               "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%lu\n",
+               "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%zu\n",
                  elements, el_size, (elements * el_size), len,
                  ring->alloc_size);
 
@@ -1424,7 +1424,7 @@ static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
                                               ring->alloc_size,
                                               &ring->dma_handle, GFP_KERNEL);
        if (!ring->pre_aligned) {
-               dev_err(gpii->gpi_dev->dev, "could not alloc size:%lu mem for ring\n",
+               dev_err(gpii->gpi_dev->dev, "could not alloc size:%zu mem for ring\n",
                        ring->alloc_size);
                return -ENOMEM;
        }
@@ -1444,8 +1444,8 @@ static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
        smp_wmb();
 
        dev_dbg(gpii->gpi_dev->dev,
-               "phy_pre:0x%0llx phy_alig:0x%0llx len:%u el_size:%u elements:%u\n",
-               ring->dma_handle, ring->phys_addr, ring->len,
+               "phy_pre:%pad phy_alig:%pa len:%u el_size:%u elements:%u\n",
+               &ring->dma_handle, &ring->phys_addr, ring->len,
                ring->el_size, ring->elements);
 
        return 0;
@@ -1948,7 +1948,7 @@ static int gpi_ch_init(struct gchan *gchan)
        return ret;
 
 error_start_chan:
-       for (i = i - 1; i >= 0; i++) {
+       for (i = i - 1; i >= 0; i--) {
                gpi_stop_chan(&gpii->gchan[i]);
                gpi_send_cmd(gpii, gchan, GPI_CH_CMD_RESET);
        }
index e4637ec786d396fb58907567c616e5603fe05a1a..36ba8b43e78deef2f2bde459b2f064205fc951cc 100644 (file)
 #define STM32_MDMA_MAX_CHANNELS                63
 #define STM32_MDMA_MAX_REQUESTS                256
 #define STM32_MDMA_MAX_BURST           128
-#define STM32_MDMA_VERY_HIGH_PRIORITY  0x11
+#define STM32_MDMA_VERY_HIGH_PRIORITY  0x3
 
 enum stm32_mdma_trigger_mode {
        STM32_MDMA_BUFFER,
index 87157cbae1b8e782b414fd768f330b878b5b016d..298460438bb4d86ec1e2df62bc8b960b8fbf31c9 100644 (file)
@@ -4698,9 +4698,9 @@ static int pktdma_setup_resources(struct udma_dev *ud)
                ud->tchan_tpl.levels = 1;
        }
 
-       ud->tchan_tpl.levels = ud->tchan_tpl.levels;
-       ud->tchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
-       ud->tchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
+       ud->rchan_tpl.levels = ud->tchan_tpl.levels;
+       ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
+       ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
 
        ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
                                           sizeof(unsigned long), GFP_KERNEL);
index 22faea653ea82010c4411530c4e08cb330995920..79777550a6ffc2e8db8e6fbb472f4b8fa55f9c12 100644 (file)
@@ -2781,7 +2781,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
                has_dre = false;
 
        if (!has_dre)
-               xdev->common.copy_align = fls(width - 1);
+               xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
 
        if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
            of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
@@ -2900,7 +2900,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
                                    struct device_node *node)
 {
-       int ret, i, nr_channels = 1;
+       int ret, i;
+       u32 nr_channels = 1;
 
        ret = of_property_read_u32(node, "dma-channels", &nr_channels);
        if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
@@ -3112,7 +3113,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
        }
 
        /* Register the DMA engine with the core */
-       dma_async_device_register(&xdev->common);
+       err = dma_async_device_register(&xdev->common);
+       if (err) {
+               dev_err(xdev->dev, "failed to register the dma device\n");
+               goto error;
+       }
 
        err = of_dma_controller_register(node, of_dma_xilinx_xlate,
                                         xdev);
index 306077884a6794e5709efcefea25dea0554b3a33..6107ac91db250731468f7e86b56ffbe63daca372 100644 (file)
@@ -112,6 +112,7 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
 union igp_info {
        struct atom_integrated_system_info_v1_11 v11;
        struct atom_integrated_system_info_v1_12 v12;
+       struct atom_integrated_system_info_v2_1 v21;
 };
 
 union umc_info {
@@ -209,24 +210,42 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
                if (adev->flags & AMD_IS_APU) {
                        igp_info = (union igp_info *)
                                (mode_info->atom_context->bios + data_offset);
-                       switch (crev) {
-                       case 11:
-                               mem_channel_number = igp_info->v11.umachannelnumber;
-                               /* channel width is 64 */
-                               if (vram_width)
-                                       *vram_width = mem_channel_number * 64;
-                               mem_type = igp_info->v11.memorytype;
-                               if (vram_type)
-                                       *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+                       switch (frev) {
+                       case 1:
+                               switch (crev) {
+                               case 11:
+                               case 12:
+                                       mem_channel_number = igp_info->v11.umachannelnumber;
+                                       if (!mem_channel_number)
+                                               mem_channel_number = 1;
+                                       /* channel width is 64 */
+                                       if (vram_width)
+                                               *vram_width = mem_channel_number * 64;
+                                       mem_type = igp_info->v11.memorytype;
+                                       if (vram_type)
+                                               *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+                                       break;
+                               default:
+                                       return -EINVAL;
+                               }
                                break;
-                       case 12:
-                               mem_channel_number = igp_info->v12.umachannelnumber;
-                               /* channel width is 64 */
-                               if (vram_width)
-                                       *vram_width = mem_channel_number * 64;
-                               mem_type = igp_info->v12.memorytype;
-                               if (vram_type)
-                                       *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+                       case 2:
+                               switch (crev) {
+                               case 1:
+                               case 2:
+                                       mem_channel_number = igp_info->v21.umachannelnumber;
+                                       if (!mem_channel_number)
+                                               mem_channel_number = 1;
+                                       /* channel width is 64 */
+                                       if (vram_width)
+                                               *vram_width = mem_channel_number * 64;
+                                       mem_type = igp_info->v21.memorytype;
+                                       if (vram_type)
+                                               *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+                                       break;
+                               default:
+                                       return -EINVAL;
+                               }
                                break;
                        default:
                                return -EINVAL;
index 1cb7d73f7317bd26d0d7d1e4d552333f5e9169fa..087afab67e2216c993c6fc0f7b8f1ff9294b1e03 100644 (file)
@@ -2548,11 +2548,11 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
        if (adev->gmc.xgmi.num_physical_nodes > 1)
                amdgpu_xgmi_remove_device(adev);
 
-       amdgpu_amdkfd_device_fini(adev);
-
        amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
        amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
 
+       amdgpu_amdkfd_device_fini(adev);
+
        /* need to disable SMC first */
        for (i = 0; i < adev->num_ip_blocks; i++) {
                if (!adev->ip_blocks[i].status.hw)
@@ -3034,7 +3034,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
 #endif
        default:
                if (amdgpu_dc > 0)
-                       DRM_INFO("Display Core has been requested via kernel parameter "
+                       DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
                                         "but isn't supported by ASIC, ignoring\n");
                return false;
        }
index 72efd579ec5ee198ef2c100f38faefa5f60641e0..7169fb5e3d9c47f0c8fd575b11544c31cd83cdb9 100644 (file)
@@ -1085,6 +1085,8 @@ static const struct pci_device_id pciidlist[] = {
 
        /* Renoir */
        {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
+       {0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
+       {0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
 
        /* Navi12 */
        {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
index 523d22db094b6bdd34e2506b1ece238eea37ede0..347fec66942485414a400a4ac1ae2cb795278943 100644 (file)
@@ -563,7 +563,7 @@ static int psp_asd_load(struct psp_context *psp)
         * add workaround to bypass it for sriov now.
         * TODO: add version check to make it common
         */
-       if (amdgpu_sriov_vf(psp->adev) || !psp->asd_fw)
+       if (amdgpu_sriov_vf(psp->adev) || !psp->asd_ucode_size)
                return 0;
 
        cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
@@ -1315,8 +1315,12 @@ static int psp_hdcp_terminate(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       if (!psp->hdcp_context.hdcp_initialized)
-               return 0;
+       if (!psp->hdcp_context.hdcp_initialized) {
+               if (psp->hdcp_context.hdcp_shared_buf)
+                       goto out;
+               else
+                       return 0;
+       }
 
        ret = psp_hdcp_unload(psp);
        if (ret)
@@ -1324,6 +1328,7 @@ static int psp_hdcp_terminate(struct psp_context *psp)
 
        psp->hdcp_context.hdcp_initialized = false;
 
+out:
        /* free hdcp shared memory */
        amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
                              &psp->hdcp_context.hdcp_shared_mc_addr,
@@ -1462,8 +1467,12 @@ static int psp_dtm_terminate(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       if (!psp->dtm_context.dtm_initialized)
-               return 0;
+       if (!psp->dtm_context.dtm_initialized) {
+               if (psp->dtm_context.dtm_shared_buf)
+                       goto out;
+               else
+                       return 0;
+       }
 
        ret = psp_dtm_unload(psp);
        if (ret)
@@ -1471,6 +1480,7 @@ static int psp_dtm_terminate(struct psp_context *psp)
 
        psp->dtm_context.dtm_initialized = false;
 
+out:
        /* free hdcp shared memory */
        amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
                              &psp->dtm_context.dtm_shared_mc_addr,
@@ -2589,11 +2599,10 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
 
        switch (desc->fw_type) {
        case TA_FW_TYPE_PSP_ASD:
-               psp->asd_fw_version        = le32_to_cpu(desc->fw_version);
+               psp->asd_fw_version        = le32_to_cpu(desc->fw_version);
                psp->asd_feature_version   = le32_to_cpu(desc->fw_version);
-               psp->asd_ucode_size        = le32_to_cpu(desc->size_bytes);
+               psp->asd_ucode_size        = le32_to_cpu(desc->size_bytes);
                psp->asd_start_addr        = ucode_start_addr;
-               psp->asd_fw                = psp->ta_fw;
                break;
        case TA_FW_TYPE_PSP_XGMI:
                psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version);
index c136bd4497446e4b920a36618aba0cc89df51425..82e952696d24f4730936948f526d2211205cc003 100644 (file)
@@ -1518,7 +1518,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_err_handler_data *data;
        int i = 0;
-       int ret = 0;
+       int ret = 0, status;
 
        if (!con || !con->eh_data || !bps || !count)
                return -EINVAL;
@@ -1543,12 +1543,12 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
                        .size = AMDGPU_GPU_PAGE_SIZE,
                        .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
                };
-               ret = amdgpu_vram_mgr_query_page_status(
+               status = amdgpu_vram_mgr_query_page_status(
                                ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
                                data->bps[i].retired_page);
-               if (ret == -EBUSY)
+               if (status == -EBUSY)
                        (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
-               else if (ret == -ENOENT)
+               else if (status == -ENOENT)
                        (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
        }
 
index 1dd040166c635e2dc1bd0bfa30c255a847a9c5da..19d9aa76cfbfbca0db3a66f29197ba4fbaae5ced 100644 (file)
@@ -30,6 +30,7 @@
 #define EEPROM_I2C_TARGET_ADDR_VEGA20          0xA0
 #define EEPROM_I2C_TARGET_ADDR_ARCTURUS                0xA8
 #define EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342   0xA0
+#define EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID   0xA0
 
 /*
  * The 2 macros bellow represent the actual size in bytes that
@@ -62,7 +63,8 @@
 static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
 {
        if ((adev->asic_type == CHIP_VEGA20) ||
-           (adev->asic_type == CHIP_ARCTURUS))
+           (adev->asic_type == CHIP_ARCTURUS) ||
+           (adev->asic_type == CHIP_SIENNA_CICHLID))
                return true;
 
        return false;
@@ -100,6 +102,10 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
        case CHIP_ARCTURUS:
                return __get_eeprom_i2c_addr_arct(adev, i2c_addr);
 
+       case CHIP_SIENNA_CICHLID:
+               *i2c_addr = EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID;
+               break;
+
        default:
                return false;
        }
index ba108678452551f0917d01ff67cf6c186822a171..619d34c041ee025f89a4bf6f892e04df78c64a56 100644 (file)
 #define mmGCR_GENERAL_CNTL_Sienna_Cichlid                      0x1580
 #define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX     0
 
+#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh                0x0025
+#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX       1
+#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh                0x0026
+#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh_BASE_IDX       1
 #define mmSPI_CONFIG_CNTL_1_Vangogh             0x2441
 #define mmSPI_CONFIG_CNTL_1_Vangogh_BASE_IDX    1
 #define mmVGT_TF_MEMORY_BASE_HI_Vangogh          0x2261
 #define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid          0x15db
 #define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid_BASE_IDX        0
 
+#define mmGC_THROTTLE_CTRL_Sienna_Cichlid              0x2030
+#define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX     0
+
 MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
 MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
 MODULE_FIRMWARE("amdgpu/navi10_me.bin");
@@ -3324,6 +3331,7 @@ static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
 static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
 static u32 gfx_v10_3_get_disabled_sa(struct amdgpu_device *adev);
 static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev);
+static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev);
 
 static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
 {
@@ -7192,6 +7200,9 @@ static int gfx_v10_0_hw_init(void *handle)
        if (adev->asic_type == CHIP_SIENNA_CICHLID)
                gfx_v10_3_program_pbb_mode(adev);
 
+       if (adev->asic_type >= CHIP_SIENNA_CICHLID)
+               gfx_v10_3_set_power_brake_sequence(adev);
+
        return r;
 }
 
@@ -7377,8 +7388,16 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
 
        amdgpu_gfx_off_ctrl(adev, false);
        mutex_lock(&adev->gfx.gpu_clock_mutex);
-       clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
-               ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
+       switch (adev->asic_type) {
+       case CHIP_VANGOGH:
+               clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) |
+                       ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL);
+               break;
+       default:
+               clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
+                       ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
+               break;
+       }
        mutex_unlock(&adev->gfx.gpu_clock_mutex);
        amdgpu_gfx_off_ctrl(adev, true);
        return clock;
@@ -9169,6 +9188,31 @@ static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev)
        }
 }
 
+static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev)
+{
+       WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX,
+                    (0x1 << GRBM_GFX_INDEX__SA_BROADCAST_WRITES__SHIFT) |
+                    (0x1 << GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT) |
+                    (0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT));
+
+       WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, ixPWRBRK_STALL_PATTERN_CTRL);
+       WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA,
+                    (0x1 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_STEP_INTERVAL__SHIFT) |
+                    (0x12 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_BEGIN_STEP__SHIFT) |
+                    (0x13 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_END_STEP__SHIFT) |
+                    (0xf << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_THROTTLE_PATTERN_BIT_NUMS__SHIFT));
+
+       WREG32_SOC15(GC, 0, mmGC_THROTTLE_CTRL_Sienna_Cichlid,
+                    (0x1 << GC_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT) |
+                    (0x1 << GC_THROTTLE_CTRL__PATTERN_MODE__SHIFT) |
+                    (0x5 << GC_THROTTLE_CTRL__RELEASE_STEP_INTERVAL__SHIFT));
+
+       WREG32_SOC15(GC, 0, mmDIDT_IND_INDEX, ixDIDT_SQ_THROTTLE_CTRL);
+
+       WREG32_SOC15(GC, 0, mmDIDT_IND_DATA,
+                    (0x1 << DIDT_SQ_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT));
+}
+
 const struct amdgpu_ip_block_version gfx_v10_0_ip_block =
 {
        .type = AMD_IP_BLOCK_TYPE_GFX,
index b72c8e4ca36bdf3940aab63c1b1f8965fc774523..07104a1de3082b776211bd36f7ea1eaa8e4873ae 100644 (file)
@@ -310,7 +310,7 @@ static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev)
                /* Send no-retry XNACK on fault to suppress VM fault storm. */
                tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
                                    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
-                                   !amdgpu_noretry);
+                                   !adev->gmc.noretry);
                WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,
                                    i * hub->ctx_distance, tmp);
                WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
index d65a5339d354ac268760c7b88e74fbdb174ea0dc..3ba7bdfde65d81f8f0c91c88b93f1ce15d33cd47 100644 (file)
@@ -47,7 +47,7 @@ enum psp_gfx_crtl_cmd_id
     GFX_CTRL_CMD_ID_DISABLE_INT     = 0x00060000,   /* disable PSP-to-Gfx interrupt */
     GFX_CTRL_CMD_ID_MODE1_RST       = 0x00070000,   /* trigger the Mode 1 reset */
     GFX_CTRL_CMD_ID_GBR_IH_SET      = 0x00080000,   /* set Gbr IH_RB_CNTL registers */
-    GFX_CTRL_CMD_ID_CONSUME_CMD     = 0x000A0000,   /* send interrupt to psp for updating write pointer of vf */
+    GFX_CTRL_CMD_ID_CONSUME_CMD     = 0x00090000,   /* send interrupt to psp for updating write pointer of vf */
     GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */
 
     GFX_CTRL_CMD_ID_MAX             = 0x000F0000,   /* max command ID */
index 8a23636ecc27ff8577997ce207733d7f240e7ecf..0b3516c4eefb33881c235ba44d8338c0cef2515e 100644 (file)
@@ -1239,7 +1239,8 @@ static int soc15_common_early_init(void *handle)
                break;
        case CHIP_RENOIR:
                adev->asic_funcs = &soc15_asic_funcs;
-               if (adev->pdev->device == 0x1636)
+               if ((adev->pdev->device == 0x1636) ||
+                   (adev->pdev->device == 0x164c))
                        adev->apu_flags |= AMD_APU_IS_RENOIR;
                else
                        adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
index 8cac497c2c459aa86421ddbce9eab12d6544ce03..a5640a6138cf2cb019cb00d12a08d4f6faee2ad6 100644 (file)
@@ -1040,11 +1040,14 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
                                (struct crat_subtype_iolink *)sub_type_hdr);
                if (ret < 0)
                        return ret;
-               crat_table->length += (sub_type_hdr->length * entries);
-               crat_table->total_entries += entries;
 
-               sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
-                               sub_type_hdr->length * entries);
+               if (entries) {
+                       crat_table->length += (sub_type_hdr->length * entries);
+                       crat_table->total_entries += entries;
+
+                       sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+                                       sub_type_hdr->length * entries);
+               }
 #else
                pr_info("IO link not available for non x86 platforms\n");
 #endif
index 797b5d4b43e5e309e6e8d99e67ecc1daf95a272e..e509a175ed1758b2cc5dc0b8ddb8a92f164dbae8 100644 (file)
@@ -6,7 +6,7 @@ config DRM_AMD_DC
        bool "AMD DC - Enable new display engine"
        default y
        select SND_HDA_COMPONENT if SND_HDA_CORE
-       select DRM_AMD_DC_DCN if (X86 || PPC64 || (ARM64 && KERNEL_MODE_NEON)) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
+       select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
        help
          Choose this option if you want to use the new display engine
          support for AMDGPU. This adds required support for Vega and
index 519080e9a23388b0cb60b808525720134f503dd1..c6da89df055de87c89ba0ba05ed8739993a1c97b 100644 (file)
@@ -939,41 +939,6 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
 }
 #endif
 
-#ifdef CONFIG_DEBUG_FS
-static int create_crtc_crc_properties(struct amdgpu_display_manager *dm)
-{
-       dm->crc_win_x_start_property =
-               drm_property_create_range(adev_to_drm(dm->adev),
-                                         DRM_MODE_PROP_ATOMIC,
-                                         "AMD_CRC_WIN_X_START", 0, U16_MAX);
-       if (!dm->crc_win_x_start_property)
-               return -ENOMEM;
-
-       dm->crc_win_y_start_property =
-               drm_property_create_range(adev_to_drm(dm->adev),
-                                         DRM_MODE_PROP_ATOMIC,
-                                         "AMD_CRC_WIN_Y_START", 0, U16_MAX);
-       if (!dm->crc_win_y_start_property)
-               return -ENOMEM;
-
-       dm->crc_win_x_end_property =
-               drm_property_create_range(adev_to_drm(dm->adev),
-                                         DRM_MODE_PROP_ATOMIC,
-                                         "AMD_CRC_WIN_X_END", 0, U16_MAX);
-       if (!dm->crc_win_x_end_property)
-               return -ENOMEM;
-
-       dm->crc_win_y_end_property =
-               drm_property_create_range(adev_to_drm(dm->adev),
-                                         DRM_MODE_PROP_ATOMIC,
-                                         "AMD_CRC_WIN_Y_END", 0, U16_MAX);
-       if (!dm->crc_win_y_end_property)
-               return -ENOMEM;
-
-       return 0;
-}
-#endif
-
 static int amdgpu_dm_init(struct amdgpu_device *adev)
 {
        struct dc_init_data init_data;
@@ -1120,10 +1085,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
                dc_init_callbacks(adev->dm.dc, &init_params);
        }
-#endif
-#ifdef CONFIG_DEBUG_FS
-       if (create_crtc_crc_properties(&adev->dm))
-               DRM_ERROR("amdgpu: failed to create crc property.\n");
 #endif
        if (amdgpu_dm_initialize_drm_device(adev)) {
                DRM_ERROR(
@@ -2386,8 +2347,7 @@ void amdgpu_dm_update_connector_after_detect(
 
                        drm_connector_update_edid_property(connector,
                                                           aconnector->edid);
-                       aconnector->num_modes = drm_add_edid_modes(connector, aconnector->edid);
-                       drm_connector_list_update(connector);
+                       drm_add_edid_modes(connector, aconnector->edid);
 
                        if (aconnector->dc_link->aux_mode)
                                drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
@@ -5334,64 +5294,12 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
        state->crc_src = cur->crc_src;
        state->cm_has_degamma = cur->cm_has_degamma;
        state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
-#ifdef CONFIG_DEBUG_FS
-       state->crc_window = cur->crc_window;
-#endif
+
        /* TODO Duplicate dc_stream after objects are stream object is flattened */
 
        return &state->base;
 }
 
-#ifdef CONFIG_DEBUG_FS
-static int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
-                                           struct drm_crtc_state *crtc_state,
-                                           struct drm_property *property,
-                                           uint64_t val)
-{
-       struct drm_device *dev = crtc->dev;
-       struct amdgpu_device *adev = drm_to_adev(dev);
-       struct dm_crtc_state *dm_new_state =
-               to_dm_crtc_state(crtc_state);
-
-       if (property == adev->dm.crc_win_x_start_property)
-               dm_new_state->crc_window.x_start = val;
-       else if (property == adev->dm.crc_win_y_start_property)
-               dm_new_state->crc_window.y_start = val;
-       else if (property == adev->dm.crc_win_x_end_property)
-               dm_new_state->crc_window.x_end = val;
-       else if (property == adev->dm.crc_win_y_end_property)
-               dm_new_state->crc_window.y_end = val;
-       else
-               return -EINVAL;
-
-       return 0;
-}
-
-static int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
-                                           const struct drm_crtc_state *state,
-                                           struct drm_property *property,
-                                           uint64_t *val)
-{
-       struct drm_device *dev = crtc->dev;
-       struct amdgpu_device *adev = drm_to_adev(dev);
-       struct dm_crtc_state *dm_state =
-               to_dm_crtc_state(state);
-
-       if (property == adev->dm.crc_win_x_start_property)
-               *val = dm_state->crc_window.x_start;
-       else if (property == adev->dm.crc_win_y_start_property)
-               *val = dm_state->crc_window.y_start;
-       else if (property == adev->dm.crc_win_x_end_property)
-               *val = dm_state->crc_window.x_end;
-       else if (property == adev->dm.crc_win_y_end_property)
-               *val = dm_state->crc_window.y_end;
-       else
-               return -EINVAL;
-
-       return 0;
-}
-#endif
-
 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
 {
        enum dc_irq_source irq_source;
@@ -5458,10 +5366,6 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
        .enable_vblank = dm_enable_vblank,
        .disable_vblank = dm_disable_vblank,
        .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
-#ifdef CONFIG_DEBUG_FS
-       .atomic_set_property = amdgpu_dm_crtc_atomic_set_property,
-       .atomic_get_property = amdgpu_dm_crtc_atomic_get_property,
-#endif
 };
 
 static enum drm_connector_status
@@ -6663,25 +6567,6 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
        return 0;
 }
 
-#ifdef CONFIG_DEBUG_FS
-static void attach_crtc_crc_properties(struct amdgpu_display_manager *dm,
-                               struct amdgpu_crtc *acrtc)
-{
-       drm_object_attach_property(&acrtc->base.base,
-                                  dm->crc_win_x_start_property,
-                                  0);
-       drm_object_attach_property(&acrtc->base.base,
-                                  dm->crc_win_y_start_property,
-                                  0);
-       drm_object_attach_property(&acrtc->base.base,
-                                  dm->crc_win_x_end_property,
-                                  0);
-       drm_object_attach_property(&acrtc->base.base,
-                                  dm->crc_win_y_end_property,
-                                  0);
-}
-#endif
-
 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
                               struct drm_plane *plane,
                               uint32_t crtc_index)
@@ -6729,9 +6614,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
        drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
                                   true, MAX_COLOR_LUT_ENTRIES);
        drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
-#ifdef CONFIG_DEBUG_FS
-       attach_crtc_crc_properties(dm, acrtc);
-#endif
+
        return 0;
 
 fail:
@@ -8368,7 +8251,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
         */
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
-               bool configure_crc = false;
 
                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 
@@ -8378,30 +8260,21 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                        dc_stream_retain(dm_new_crtc_state->stream);
                        acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
                        manage_dm_interrupts(adev, acrtc, true);
-               }
+
 #ifdef CONFIG_DEBUG_FS
-               if (new_crtc_state->active &&
-                       amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
                        /**
                         * Frontend may have changed so reapply the CRC capture
                         * settings for the stream.
                         */
                        dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
-                       dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
-
-                       if (amdgpu_dm_crc_window_is_default(dm_new_crtc_state)) {
-                               if (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))
-                                       configure_crc = true;
-                       } else {
-                               if (amdgpu_dm_crc_window_changed(dm_new_crtc_state, dm_old_crtc_state))
-                                       configure_crc = true;
-                       }
 
-                       if (configure_crc)
+                       if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
                                amdgpu_dm_crtc_configure_crc_source(
-                                       crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
-               }
+                                       crtc, dm_new_crtc_state,
+                                       dm_new_crtc_state->crc_src);
+                       }
 #endif
+               }
        }
 
        for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
index 2ee6edb3df931b5eb100722377eff805f9c19fd7..1182dafcef0228fd8818cfd82e597066a7abc3c0 100644 (file)
@@ -336,32 +336,6 @@ struct amdgpu_display_manager {
         */
        const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
 
-#ifdef CONFIG_DEBUG_FS
-       /**
-        * @crc_win_x_start_property:
-        *
-        * X start of the crc calculation window
-        */
-       struct drm_property *crc_win_x_start_property;
-       /**
-        * @crc_win_y_start_property:
-        *
-        * Y start of the crc calculation window
-        */
-       struct drm_property *crc_win_y_start_property;
-       /**
-        * @crc_win_x_end_property:
-        *
-        * X end of the crc calculation window
-        */
-       struct drm_property *crc_win_x_end_property;
-       /**
-        * @crc_win_y_end_property:
-        *
-        * Y end of the crc calculation window
-        */
-       struct drm_property *crc_win_y_end_property;
-#endif
        /**
         * @mst_encoders:
         *
@@ -448,15 +422,6 @@ struct dm_plane_state {
        struct dc_plane_state *dc_state;
 };
 
-#ifdef CONFIG_DEBUG_FS
-struct crc_rec {
-       uint16_t x_start;
-       uint16_t y_start;
-       uint16_t x_end;
-       uint16_t y_end;
-       };
-#endif
-
 struct dm_crtc_state {
        struct drm_crtc_state base;
        struct dc_stream_state *stream;
@@ -479,9 +444,6 @@ struct dm_crtc_state {
        struct dc_info_packet vrr_infopacket;
 
        int abm_level;
-#ifdef CONFIG_DEBUG_FS
-       struct crc_rec crc_window;
-#endif
 };
 
 #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
index 7b886a779a8cabe94b9b48272869630c2fd9f42e..66cb8730586b1c0b520024bbb501b6b83e536c99 100644 (file)
@@ -81,41 +81,6 @@ const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
        return pipe_crc_sources;
 }
 
-static void amdgpu_dm_set_crc_window_default(struct dm_crtc_state *dm_crtc_state)
-{
-       dm_crtc_state->crc_window.x_start = 0;
-       dm_crtc_state->crc_window.y_start = 0;
-       dm_crtc_state->crc_window.x_end = 0;
-       dm_crtc_state->crc_window.y_end = 0;
-}
-
-bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state)
-{
-       bool ret = true;
-
-       if ((dm_crtc_state->crc_window.x_start != 0) ||
-               (dm_crtc_state->crc_window.y_start != 0) ||
-               (dm_crtc_state->crc_window.x_end != 0) ||
-               (dm_crtc_state->crc_window.y_end != 0))
-               ret = false;
-
-       return ret;
-}
-
-bool amdgpu_dm_crc_window_changed(struct dm_crtc_state *dm_new_crtc_state,
-                                       struct dm_crtc_state *dm_old_crtc_state)
-{
-       bool ret = false;
-
-       if ((dm_new_crtc_state->crc_window.x_start != dm_old_crtc_state->crc_window.x_start) ||
-               (dm_new_crtc_state->crc_window.y_start != dm_old_crtc_state->crc_window.y_start) ||
-               (dm_new_crtc_state->crc_window.x_end != dm_old_crtc_state->crc_window.x_end) ||
-               (dm_new_crtc_state->crc_window.y_end != dm_old_crtc_state->crc_window.y_end))
-               ret = true;
-
-       return ret;
-}
-
 int
 amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
                                 size_t *values_cnt)
@@ -140,7 +105,6 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
        struct dc_stream_state *stream_state = dm_crtc_state->stream;
        bool enable = amdgpu_dm_is_valid_crc_source(source);
        int ret = 0;
-       struct crc_params *crc_window = NULL, tmp_window;
 
        /* Configuration will be deferred to stream enable. */
        if (!stream_state)
@@ -150,24 +114,8 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
 
        /* Enable CRTC CRC generation if necessary. */
        if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
-               if (!enable)
-                       amdgpu_dm_set_crc_window_default(dm_crtc_state);
-
-               if (!amdgpu_dm_crc_window_is_default(dm_crtc_state)) {
-                       crc_window = &tmp_window;
-
-                       tmp_window.windowa_x_start = dm_crtc_state->crc_window.x_start;
-                       tmp_window.windowa_y_start = dm_crtc_state->crc_window.y_start;
-                       tmp_window.windowa_x_end = dm_crtc_state->crc_window.x_end;
-                       tmp_window.windowa_y_end = dm_crtc_state->crc_window.y_end;
-                       tmp_window.windowb_x_start = dm_crtc_state->crc_window.x_start;
-                       tmp_window.windowb_y_start = dm_crtc_state->crc_window.y_start;
-                       tmp_window.windowb_x_end = dm_crtc_state->crc_window.x_end;
-                       tmp_window.windowb_y_end = dm_crtc_state->crc_window.y_end;
-               }
-
                if (!dc_stream_configure_crc(stream_state->ctx->dc,
-                                            stream_state, crc_window, enable, enable)) {
+                                            stream_state, NULL, enable, enable)) {
                        ret = -EINVAL;
                        goto unlock;
                }
index 0235bfb246e5dbed489a600a6ee051e555c122f5..f7d731797d3fc339389ace86c5b27c56b6152b51 100644 (file)
@@ -47,9 +47,6 @@ static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source
 
 /* amdgpu_dm_crc.c */
 #ifdef CONFIG_DEBUG_FS
-bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state);
-bool amdgpu_dm_crc_window_changed(struct dm_crtc_state *dm_new_crtc_state,
-                                       struct dm_crtc_state *dm_old_crtc_state);
 int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
                                        struct dm_crtc_state *dm_crtc_state,
                                        enum amdgpu_dm_pipe_crc_source source);
index 64f515d74410315c9ef4adb2fcb0e5cec50b0f3b..f3c00f479e1cb56a89c68c2ad3e0dbc07ac70b09 100644 (file)
@@ -33,10 +33,6 @@ ifdef CONFIG_PPC64
 calcs_ccflags := -mhard-float -maltivec
 endif
 
-ifdef CONFIG_ARM64
-calcs_rcflags := -mgeneral-regs-only
-endif
-
 ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
 IS_OLD_GCC = 1
index d59b380e7b7fbe3d90edcb629c161da50b6b3506..ff96bee57bfc56111a2ba2d284f0f5a76bc10384 100644 (file)
@@ -104,13 +104,6 @@ ifdef CONFIG_PPC64
 CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
 endif
 
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := -mgeneral-regs-only
-endif
-
 AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DCN21))
 
 AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
@@ -125,13 +118,6 @@ ifdef CONFIG_PPC64
 CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn30/dcn30_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
 endif
 
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn30/dcn30_clk_mgr.o := -mgeneral-regs-only
-endif
-
 AMD_DAL_CLK_MGR_DCN30 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn30/,$(CLK_MGR_DCN30))
 
 AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN30)
@@ -146,13 +132,6 @@ ifdef CONFIG_PPC64
 CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn301/vg_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
 endif
 
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn301/vg_clk_mgr.o := -mgeneral-regs-only
-endif
-
 AMD_DAL_CLK_MGR_DCN301 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn301/,$(CLK_MGR_DCN301))
 
 AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN301)
index 9e1071b2181ffa3439521a4fdb3c252d049e67e5..f4a2088ab17928ee2a7c14150f7b4ddab2e0a843 100644 (file)
@@ -2487,9 +2487,14 @@ enum dc_status dc_link_validate_mode_timing(
 static struct abm *get_abm_from_stream_res(const struct dc_link *link)
 {
        int i;
-       struct dc *dc = link->ctx->dc;
+       struct dc *dc = NULL;
        struct abm *abm = NULL;
 
+       if (!link || !link->ctx)
+               return NULL;
+
+       dc = link->ctx->dc;
+
        for (i = 0; i < MAX_PIPES; i++) {
                struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
                struct dc_stream_state *stream = pipe_ctx.stream;
index 2fc12239b22cb72f4c81fbf83b83969b4e6efed0..1bd1a0935290673535333991f7fa7f7cb37798d6 100644 (file)
@@ -3992,7 +3992,7 @@ bool dc_link_dp_set_test_pattern(
        unsigned int cust_pattern_size)
 {
        struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
-       struct pipe_ctx *pipe_ctx = &pipes[0];
+       struct pipe_ctx *pipe_ctx = NULL;
        unsigned int lane;
        unsigned int i;
        unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0};
@@ -4002,12 +4002,18 @@ bool dc_link_dp_set_test_pattern(
        memset(&training_pattern, 0, sizeof(training_pattern));
 
        for (i = 0; i < MAX_PIPES; i++) {
+               if (pipes[i].stream == NULL)
+                       continue;
+
                if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
                        pipe_ctx = &pipes[i];
                        break;
                }
        }
 
+       if (pipe_ctx == NULL)
+               return false;
+
        /* Reset CRTC Test Pattern if it is currently running and request is VideoMode */
        if (link->test_pattern_enabled && test_pattern ==
                        DP_TEST_PATTERN_VIDEO_MODE) {
index 733e6e6e43bd65529162f1201cd434591ebbb493..62ad1a11bff9c5623300936bf8f5742f6c814ceb 100644 (file)
@@ -31,11 +31,4 @@ DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
 
 AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
 
-# fix:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# aarch64 does not support soft-float, so use hard-float and handle this in code
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn10/dcn10_resource.o := -mgeneral-regs-only
-endif
-
 AMD_DISPLAY_FILES += $(AMD_DAL_DCN10)
index 100ce0e28fd5ab40c85caccf96e86a4f95bc00bf..b096011acb490fff2016c154c2ccd20f723b0479 100644 (file)
@@ -470,7 +470,7 @@ void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock)
 unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id)
 {
        struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
-       uint32_t val = 0;
+       uint32_t val = 0xf;
 
        if (opp_id < MAX_OPP && REG(MUX[opp_id]))
                REG_GET(MUX[opp_id], MPC_OUT_MUX, &val);
index bdc37831535e43a7fdff48a7f635e925635bba63..90e912fef2b36c069142e570bea412bb151486f4 100644 (file)
@@ -608,8 +608,8 @@ static const struct dc_debug_options debug_defaults_drv = {
                .disable_pplib_clock_request = false,
                .disable_pplib_wm_range = false,
                .pplib_wm_report_mode = WM_REPORT_DEFAULT,
-               .pipe_split_policy = MPC_SPLIT_DYNAMIC,
-               .force_single_disp_pipe_split = true,
+               .pipe_split_policy = MPC_SPLIT_AVOID,
+               .force_single_disp_pipe_split = false,
                .disable_dcc = DCC_ENABLE,
                .voltage_align_fclk = true,
                .disable_stereo_support = true,
@@ -1534,15 +1534,8 @@ static bool dcn10_resource_construct(
        memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
        memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
 
-#if defined(CONFIG_ARM64)
-       /* Aarch64 does not support -msoft-float/-mfloat-abi=soft */
-       DC_FP_START();
-       dcn10_resource_construct_fp(dc);
-       DC_FP_END();
-#else
        /* Other architectures we build for build this with soft-float */
        dcn10_resource_construct_fp(dc);
-#endif
 
        pool->base.pp_smu = dcn10_pp_smu_create(ctx);
 
index 624cb1341ef1450de587f1bdf849e45c1aaa7eca..5fcaf78334ff9a96a24948422733a310bbe08fd2 100644 (file)
@@ -17,10 +17,6 @@ ifdef CONFIG_PPC64
 CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -maltivec
 endif
 
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mgeneral-regs-only
-endif
-
 ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
 IS_OLD_GCC = 1
index 1ee5fc03b7b3dcab7ae7f72e1fda9bd57523784b..bb8c9514108222090c22b2bc8543f3709bf8b505 100644 (file)
@@ -13,10 +13,6 @@ ifdef CONFIG_PPC64
 CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -maltivec
 endif
 
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mgeneral-regs-only
-endif
-
 ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
 IS_OLD_GCC = 1
index 248c2711aacef57e249dd3aa7dd8ef6907210cc2..c20331eb62e01e6f66cc9e1eeda2c6022c3b807f 100644 (file)
@@ -41,11 +41,6 @@ CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -maltivec
 CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -maltivec
 endif
 
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mgeneral-regs-only
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mgeneral-regs-only
-endif
-
 ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
 IS_OLD_GCC = 1
index 2fd5d34e4ba6f874719ab528c0d24e8d22eabdce..3ca7d911d25c440c5b8862660b25a0749355c718 100644 (file)
@@ -21,10 +21,6 @@ ifdef CONFIG_PPC64
 CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mhard-float -maltivec
 endif
 
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mgeneral-regs-only
-endif
-
 ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
 IS_OLD_GCC = 1
index 4825c5c1c6ed686217ec3f9850a75fd080091bdc..35f5bf08ae96e2570d22282da46f1f499236d4a4 100644 (file)
@@ -1731,6 +1731,7 @@ static struct resource_funcs dcn301_res_pool_funcs = {
        .populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
        .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
        .add_stream_to_ctx = dcn30_add_stream_to_ctx,
+       .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
        .remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
        .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context,
        .set_mcif_arb_params = dcn30_set_mcif_arb_params,
index 36e44e1b07faf91e1dcbb72c1935a5c87cea28d5..8d4924b7dc2216312f5b82be65b4133e705df30a 100644 (file)
@@ -20,10 +20,6 @@ ifdef CONFIG_PPC64
 CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mhard-float -maltivec
 endif
 
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mgeneral-regs-only
-endif
-
 ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
 IS_OLD_GCC = 1
index a02a33dcd70bd49549994465cd2fc34492fccb56..6bb7f2905821b61c51385286703d4083509f73bd 100644 (file)
@@ -33,10 +33,6 @@ ifdef CONFIG_PPC64
 dml_ccflags := -mhard-float -maltivec
 endif
 
-ifdef CONFIG_ARM64
-dml_rcflags := -mgeneral-regs-only
-endif
-
 ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
 IS_OLD_GCC = 1
index 860e72a51534cc014d9baccf227aeccc1979a4a9..80170f9721ce949330d72408b81c48575af13101 100644 (file)
@@ -2635,14 +2635,15 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
        }
 
        if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
-                       mode_lib->vba.MinActiveDRAMClockChangeMargin > 60 &&
-                       mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
+                       mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
                mode_lib->vba.DRAMClockChangeWatermark += 25;
 
                for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
-                       if (mode_lib->vba.DRAMClockChangeWatermark >
-                       dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
-                               mode_lib->vba.MinTTUVBlank[k] += 25;
+                       if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
+                               if (mode_lib->vba.DRAMClockChangeWatermark >
+                               dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
+                                       mode_lib->vba.MinTTUVBlank[k] += 25;
+                       }
                }
 
                mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
index f2624a1156e5c8cea21ede1e8694d9c20ded3783..8d31eb75c6a6e803f7bedcb840b693b991e120f0 100644 (file)
@@ -10,10 +10,6 @@ ifdef CONFIG_PPC64
 dsc_ccflags := -mhard-float -maltivec
 endif
 
-ifdef CONFIG_ARM64
-dsc_rcflags := -mgeneral-regs-only
-endif
-
 ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
 IS_OLD_GCC = 1
index 95cb56929e79e76fc2a6320a8f5b0477d27e8fa1..126c2f3a4dd3b289b4ab871d8af5a8b48c42e70b 100644 (file)
 #include <asm/fpu/api.h>
 #define DC_FP_START() kernel_fpu_begin()
 #define DC_FP_END() kernel_fpu_end()
-#elif defined(CONFIG_ARM64)
-#include <asm/neon.h>
-#define DC_FP_START() kernel_neon_begin()
-#define DC_FP_END() kernel_neon_end()
 #elif defined(CONFIG_PPC64)
 #include <asm/switch_to.h>
 #include <asm/cputable.h>
index e57e64bbacdc2a3ea517c13eb9a7974b685cc023..88322781e447b5812896890b1a776fbae4010092 100644 (file)
@@ -251,7 +251,7 @@ static int smu10_set_hard_min_gfxclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cl
                smu10_data->gfx_actual_soft_min_freq = clock;
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetHardMinGfxClk,
-                                       smu10_data->gfx_actual_soft_min_freq,
+                                       clock,
                                        NULL);
        }
        return 0;
@@ -558,7 +558,8 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 
        /* enable the pp_od_clk_voltage sysfs file */
        hwmgr->od_enabled = 1;
-
+       /* disabled fine grain tuning function by default */
+       data->fine_grain_enabled = 0;
        return result;
 }
 
@@ -597,6 +598,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
        uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100;
        uint32_t index_fclk = data->clock_vol_info.vdd_dep_on_fclk->count - 1;
        uint32_t index_socclk = data->clock_vol_info.vdd_dep_on_socclk->count - 1;
+       uint32_t fine_grain_min_freq = 0, fine_grain_max_freq = 0;
 
        if (hwmgr->smu_version < 0x1E3700) {
                pr_info("smu firmware version too old, can not set dpm level\n");
@@ -613,6 +615,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
        switch (level) {
        case AMD_DPM_FORCED_LEVEL_HIGH:
        case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+               data->fine_grain_enabled = 0;
+
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+               data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+               data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinGfxClk,
                                                data->gfx_max_freq_limit/100,
@@ -648,6 +658,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                                                NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+               data->fine_grain_enabled = 0;
+
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+               data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+               data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinGfxClk,
                                                min_sclk,
@@ -658,6 +676,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                                                NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+               data->fine_grain_enabled = 0;
+
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+               data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+               data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinFclkByFreq,
                                                min_mclk,
@@ -668,6 +694,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                                                NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+               data->fine_grain_enabled = 0;
+
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+               data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+               data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinGfxClk,
                                                SMU10_UMD_PSTATE_GFXCLK,
@@ -703,6 +737,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                                                NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_AUTO:
+               data->fine_grain_enabled = 0;
+
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+               data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+               data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinGfxClk,
                                                min_sclk,
@@ -741,6 +783,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                                                NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_LOW:
+               data->fine_grain_enabled = 0;
+
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+               data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+               data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinGfxClk,
                                                data->gfx_min_freq_limit/100,
@@ -759,6 +809,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                                                NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_MANUAL:
+               data->fine_grain_enabled = 1;
        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
        default:
                break;
@@ -948,6 +999,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
        struct smu10_voltage_dependency_table *mclk_table =
                        data->clock_vol_info.vdd_dep_on_fclk;
        uint32_t i, now, size = 0;
+       uint32_t min_freq, max_freq = 0;
+       uint32_t ret = 0;
 
        switch (type) {
        case PP_SCLK:
@@ -983,18 +1036,28 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
                break;
        case OD_SCLK:
                if (hwmgr->od_enabled) {
-                       size = sprintf(buf, "%s:\n", "OD_SCLK");
+                       ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+                       if (ret)
+                               return ret;
+                       ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+                       if (ret)
+                               return ret;
 
+                       size = sprintf(buf, "%s:\n", "OD_SCLK");
                        size += sprintf(buf + size, "0: %10uMhz\n",
-                       (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : data->gfx_min_freq_limit/100);
-                       size += sprintf(buf + size, "1: %10uMhz\n", data->gfx_max_freq_limit/100);
+                       (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
+                       size += sprintf(buf + size, "1: %10uMhz\n",
+                       (data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq);
                }
                break;
        case OD_RANGE:
                if (hwmgr->od_enabled) {
-                       uint32_t min_freq, max_freq = 0;
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+                       ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+                       if (ret)
+                               return ret;
+                       ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+                       if (ret)
+                               return ret;
 
                        size = sprintf(buf, "%s:\n", "OD_RANGE");
                        size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
@@ -1414,23 +1477,96 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
                                        enum PP_OD_DPM_TABLE_COMMAND type,
                                        long *input, uint32_t size)
 {
+       uint32_t min_freq, max_freq = 0;
+       struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+       int ret = 0;
+
        if (!hwmgr->od_enabled) {
                pr_err("Fine grain not support\n");
                return -EINVAL;
        }
 
-       if (size != 2) {
-               pr_err("Input parameter number not correct\n");
+       if (!smu10_data->fine_grain_enabled) {
+               pr_err("Fine grain not started\n");
                return -EINVAL;
        }
 
        if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
-               if (input[0] == 0)
-                       smu10_set_hard_min_gfxclk_by_freq(hwmgr, input[1]);
-               else if (input[0] == 1)
-                       smu10_set_soft_max_gfxclk_by_freq(hwmgr, input[1]);
-               else
+               if (size != 2) {
+                       pr_err("Input parameter number not correct\n");
                        return -EINVAL;
+               }
+
+               if (input[0] == 0) {
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+                       if (input[1] < min_freq) {
+                               pr_err("Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+                                       input[1], min_freq);
+                               return -EINVAL;
+                       }
+                       smu10_data->gfx_actual_soft_min_freq = input[1];
+               } else if (input[0] == 1) {
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+                       if (input[1] > max_freq) {
+                               pr_err("Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+                                       input[1], max_freq);
+                               return -EINVAL;
+                       }
+                       smu10_data->gfx_actual_soft_max_freq = input[1];
+               } else {
+                       return -EINVAL;
+               }
+       } else if (type == PP_OD_RESTORE_DEFAULT_TABLE) {
+               if (size != 0) {
+                       pr_err("Input parameter number not correct\n");
+                       return -EINVAL;
+               }
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+
+               smu10_data->gfx_actual_soft_min_freq = min_freq;
+               smu10_data->gfx_actual_soft_max_freq = max_freq;
+
+               ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_SetHardMinGfxClk,
+                                       min_freq,
+                                       NULL);
+               if (ret)
+                       return ret;
+
+               ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_SetSoftMaxGfxClk,
+                                       max_freq,
+                                       NULL);
+               if (ret)
+                       return ret;
+       } else if (type == PP_OD_COMMIT_DPM_TABLE) {
+               if (size != 0) {
+                       pr_err("Input parameter number not correct\n");
+                       return -EINVAL;
+               }
+
+               if (smu10_data->gfx_actual_soft_min_freq > smu10_data->gfx_actual_soft_max_freq) {
+                       pr_err("The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+                                       smu10_data->gfx_actual_soft_min_freq, smu10_data->gfx_actual_soft_max_freq);
+                       return -EINVAL;
+               }
+
+               ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_SetHardMinGfxClk,
+                                       smu10_data->gfx_actual_soft_min_freq,
+                                       NULL);
+               if (ret)
+                       return ret;
+
+               ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_SetSoftMaxGfxClk,
+                                       smu10_data->gfx_actual_soft_max_freq,
+                                       NULL);
+               if (ret)
+                       return ret;
+       } else {
+               return -EINVAL;
        }
 
        return 0;
index 6c9b5f060902b864593d13ac1ac48f96808af403..808e0ecbe1f096814a9057c88ef8004442db164c 100644 (file)
@@ -283,6 +283,7 @@ struct smu10_hwmgr {
        uint32_t                        vclk_soft_min;
        uint32_t                        dclk_soft_min;
        uint32_t                        gfx_actual_soft_min_freq;
+       uint32_t                        gfx_actual_soft_max_freq;
        uint32_t                        gfx_min_freq_limit;
        uint32_t                        gfx_max_freq_limit; /* in 10Khz*/
 
@@ -299,6 +300,8 @@ struct smu10_hwmgr {
        bool need_min_deep_sleep_dcefclk;
        uint32_t                             deep_sleep_dcefclk;
        uint32_t                             num_active_display;
+
+       bool                                                    fine_grain_enabled;
 };
 
 struct pp_hwmgr;
index 9608745d732fb687055a9845b49a9f2a88be86a6..12b36eb0ff6a52b09d5d05349adf21c8d80d3607 100644 (file)
@@ -2372,7 +2372,7 @@ static void sienna_cichlid_fill_i2c_req(SwI2cRequest_t  *req, bool write,
 {
        int i;
 
-       req->I2CcontrollerPort = 0;
+       req->I2CcontrollerPort = 1;
        req->I2CSpeed = 2;
        req->SlaveAddress = address;
        req->NumCmds = numbytes;
index 8cb4fcee9a2c3750143775c940435fb4ba947a65..5c1482d4ca43e3af916596de68571092c25ea276 100644 (file)
@@ -252,7 +252,8 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
                *value = metrics->UvdActivity;
                break;
        case METRICS_AVERAGE_SOCKETPOWER:
-               *value = metrics->CurrentSocketPower;
+               *value = (metrics->CurrentSocketPower << 8) /
+               1000 ;
                break;
        case METRICS_TEMPERATURE_EDGE:
                *value = metrics->GfxTemperature / 100 *
index dc75db8af3715284eabf78aa3dc04bac6c1b5343..f743685a20e8a70bf71d0b7885b4da46ec2dda78 100644 (file)
@@ -188,6 +188,7 @@ static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type
                        return -EINVAL;
                *freq = clk_table->SocClocks[dpm_level].Freq;
                break;
+       case SMU_UCLK:
        case SMU_MCLK:
                if (dpm_level >= NUM_FCLK_DPM_LEVELS)
                        return -EINVAL;
index 522d5500465557cc9d911177b00e5c7e4eb28f48..06abf2a7ce9e9e0f7f116f5c36223f5264d838b3 100644 (file)
@@ -225,6 +225,7 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
        break;
        case SMU_FCLK:
        case SMU_MCLK:
+       case SMU_UCLK:
                ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
                if (ret)
                        return ret;
index e6231947f98723a7720504ec61ad00adf4ec08b0..a0cb746bcb0a9a1344e0cd23fa3dcf7a228a9f3b 100644 (file)
@@ -1163,7 +1163,14 @@ retry:
        if (ret)
                goto out;
 
-       if (old_fb->format != fb->format) {
+       /*
+        * Only check the FOURCC format code, excluding modifiers. This is
+        * enough for all legacy drivers. Atomic drivers have their own
+        * checks in their ->atomic_check implementation, which will
+        * return -EINVAL if any hw or driver constraint is violated due
+        * to modifier changes.
+        */
+       if (old_fb->format->format != fb->format->format) {
                DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
                ret = -EINVAL;
                goto out;
index e5574e506a5ccf03684f47fc0f59d9a62f4ae1d8..6d9e81ea67f4b742d52afaf420a8ccfc9a4ef329 100644 (file)
@@ -38,6 +38,7 @@ i915-y += i915_drv.o \
          i915_config.o \
          i915_irq.o \
          i915_getparam.o \
+         i915_mitigations.o \
          i915_params.o \
          i915_pci.o \
          i915_scatterlist.o \
index a9439b4156037169b17361c2ea7a446f7d45b573..b3533a32f8ba2e4d015532123111c7ee989381a8 100644 (file)
@@ -1616,10 +1616,6 @@ static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
 
        get_dsi_io_power_domains(i915,
                                 enc_to_intel_dsi(encoder));
-
-       if (crtc_state->dsc.compression_enable)
-               intel_display_power_get(i915,
-                                       intel_dsc_power_domain(crtc_state));
 }
 
 static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
index ce82d654d0f24056bb624efdab8c456add83c1c8..34d78c654df3b72dab3b208086ddbbe1cd8984be 100644 (file)
@@ -1436,6 +1436,9 @@ struct intel_dp {
                bool ycbcr_444_to_420;
        } dfp;
 
+       /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
+       struct pm_qos_request pm_qos;
+
        /* Display stream compression testing */
        bool force_dsc_en;
 
index 2165398d2c7cd40af4d52153ab9e1999829b47b6..37f1a10fd02172e79f443c32471e74a9b407cf88 100644 (file)
@@ -1489,7 +1489,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
         * lowest possible wakeup latency and so prevent the cpu from going into
         * deep sleep states.
         */
-       cpu_latency_qos_update_request(&i915->pm_qos, 0);
+       cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
 
        intel_dp_check_edp(intel_dp);
 
@@ -1622,7 +1622,7 @@ done:
 
        ret = recv_bytes;
 out:
-       cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
+       cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
 
        if (vdd)
                edp_panel_vdd_off(intel_dp, false);
@@ -1898,6 +1898,9 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
 static void
 intel_dp_aux_fini(struct intel_dp *intel_dp)
 {
+       if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
+               cpu_latency_qos_remove_request(&intel_dp->pm_qos);
+
        kfree(intel_dp->aux.name);
 }
 
@@ -1950,6 +1953,7 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
                                               encoder->base.name);
 
        intel_dp->aux.transfer = intel_dp_aux_transfer;
+       cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
 }
 
 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
index 9f23bac0d7924d3857e1e0e8fdc202f1341dc5b5..d64fce1a17cbc5668f756853a769244554d9504d 100644 (file)
@@ -1650,16 +1650,13 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
                val = pch_get_backlight(connector);
        else
                val = lpt_get_backlight(connector);
-       val = intel_panel_compute_brightness(connector, val);
-       panel->backlight.level = clamp(val, panel->backlight.min,
-                                      panel->backlight.max);
 
        if (cpu_mode) {
                drm_dbg_kms(&dev_priv->drm,
                            "CPU backlight register was enabled, switching to PCH override\n");
 
                /* Write converted CPU PWM value to PCH override register */
-               lpt_set_backlight(connector->base.state, panel->backlight.level);
+               lpt_set_backlight(connector->base.state, val);
                intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
                               pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
 
@@ -1667,6 +1664,10 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
                               cpu_ctl2 & ~BLM_PWM_ENABLE);
        }
 
+       val = intel_panel_compute_brightness(connector, val);
+       panel->backlight.level = clamp(val, panel->backlight.min,
+                                      panel->backlight.max);
+
        return 0;
 }
 
index d52f9c1779081e02afa7d843926938f04fc25912..f94025ec603a6d132b0024b05afad604834fad02 100644 (file)
@@ -812,10 +812,20 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
                intel_dsi_prepare(encoder, pipe_config);
 
        intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
-       intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
 
-       /* Deassert reset */
-       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+       /*
+        * Give the panel time to power-on and then deassert its reset.
+        * Depending on the VBT MIPI sequences version the deassert-seq
+        * may contain the necessary delay, intel_dsi_msleep() will skip
+        * the delay in that case. If there is no deassert-seq, then an
+        * unconditional msleep is used to give the panel time to power-on.
+        */
+       if (dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
+               intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
+               intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+       } else {
+               msleep(intel_dsi->panel_on_delay);
+       }
 
        if (IS_GEMINILAKE(dev_priv)) {
                glk_cold_boot = glk_dsi_enable_io(encoder);
index bcc80f428172b623f5d2905824a175230db0fcaa..bd3046e5a934801c399a8e77055b17fa5a937afe 100644 (file)
@@ -1046,7 +1046,7 @@ static void reloc_gpu_flush(struct i915_execbuffer *eb, struct reloc_cache *cach
        GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
        cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
 
-       __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
+       i915_gem_object_flush_map(obj);
        i915_gem_object_unpin_map(obj);
 
        intel_gt_chipset_flush(cache->rq->engine->gt);
@@ -1296,6 +1296,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
                goto err_pool;
        }
 
+       memset32(cmd, 0, pool->obj->base.size / sizeof(u32));
+
        batch = i915_vma_instance(pool->obj, vma->vm, NULL);
        if (IS_ERR(batch)) {
                err = PTR_ERR(batch);
index d93d85cd30270ba371ac877632b1d2261dcdc8e5..94465374ca2fe5505c67804bf697d5b88f9c102f 100644 (file)
@@ -7,8 +7,6 @@
 #include "i915_drv.h"
 #include "intel_gpu_commands.h"
 
-#define MAX_URB_ENTRIES 64
-#define STATE_SIZE (4 * 1024)
 #define GT3_INLINE_DATA_DELAYS 0x1E00
 #define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS))
 
@@ -34,38 +32,59 @@ struct batch_chunk {
 };
 
 struct batch_vals {
-       u32 max_primitives;
-       u32 max_urb_entries;
-       u32 cmd_size;
-       u32 state_size;
+       u32 max_threads;
        u32 state_start;
-       u32 batch_size;
+       u32 surface_start;
        u32 surface_height;
        u32 surface_width;
-       u32 scratch_size;
-       u32 max_size;
+       u32 size;
 };
 
+static inline int num_primitives(const struct batch_vals *bv)
+{
+       /*
+        * We need to saturate the GPU with work in order to dispatch
+        * a shader on every HW thread, and clear the thread-local registers.
+        * In short, we have to dispatch work faster than the shaders can
+        * run in order to fill the EU and occupy each HW thread.
+        */
+       return bv->max_threads;
+}
+
 static void
 batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv)
 {
        if (IS_HASWELL(i915)) {
-               bv->max_primitives = 280;
-               bv->max_urb_entries = MAX_URB_ENTRIES;
+               switch (INTEL_INFO(i915)->gt) {
+               default:
+               case 1:
+                       bv->max_threads = 70;
+                       break;
+               case 2:
+                       bv->max_threads = 140;
+                       break;
+               case 3:
+                       bv->max_threads = 280;
+                       break;
+               }
                bv->surface_height = 16 * 16;
                bv->surface_width = 32 * 2 * 16;
        } else {
-               bv->max_primitives = 128;
-               bv->max_urb_entries = MAX_URB_ENTRIES / 2;
+               switch (INTEL_INFO(i915)->gt) {
+               default:
+               case 1: /* including vlv */
+                       bv->max_threads = 36;
+                       break;
+               case 2:
+                       bv->max_threads = 128;
+                       break;
+               }
                bv->surface_height = 16 * 8;
                bv->surface_width = 32 * 16;
        }
-       bv->cmd_size = bv->max_primitives * 4096;
-       bv->state_size = STATE_SIZE;
-       bv->state_start = bv->cmd_size;
-       bv->batch_size = bv->cmd_size + bv->state_size;
-       bv->scratch_size = bv->surface_height * bv->surface_width;
-       bv->max_size = bv->batch_size + bv->scratch_size;
+       bv->state_start = round_up(SZ_1K + num_primitives(bv) * 64, SZ_4K);
+       bv->surface_start = bv->state_start + SZ_4K;
+       bv->size = bv->surface_start + bv->surface_height * bv->surface_width;
 }
 
 static void batch_init(struct batch_chunk *bc,
@@ -155,7 +174,8 @@ static u32
 gen7_fill_binding_table(struct batch_chunk *state,
                        const struct batch_vals *bv)
 {
-       u32 surface_start = gen7_fill_surface_state(state, bv->batch_size, bv);
+       u32 surface_start =
+               gen7_fill_surface_state(state, bv->surface_start, bv);
        u32 *cs = batch_alloc_items(state, 32, 8);
        u32 offset = batch_offset(state, cs);
 
@@ -214,9 +234,9 @@ static void
 gen7_emit_state_base_address(struct batch_chunk *batch,
                             u32 surface_state_base)
 {
-       u32 *cs = batch_alloc_items(batch, 0, 12);
+       u32 *cs = batch_alloc_items(batch, 0, 10);
 
-       *cs++ = STATE_BASE_ADDRESS | (12 - 2);
+       *cs++ = STATE_BASE_ADDRESS | (10 - 2);
        /* general */
        *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
        /* surface */
@@ -233,8 +253,6 @@ gen7_emit_state_base_address(struct batch_chunk *batch,
        *cs++ = BASE_ADDRESS_MODIFY;
        *cs++ = 0;
        *cs++ = BASE_ADDRESS_MODIFY;
-       *cs++ = 0;
-       *cs++ = 0;
        batch_advance(batch, cs);
 }
 
@@ -244,8 +262,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch,
                    u32 urb_size, u32 curbe_size,
                    u32 mode)
 {
-       u32 urb_entries = bv->max_urb_entries;
-       u32 threads = bv->max_primitives - 1;
+       u32 threads = bv->max_threads - 1;
        u32 *cs = batch_alloc_items(batch, 32, 8);
 
        *cs++ = MEDIA_VFE_STATE | (8 - 2);
@@ -254,7 +271,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch,
        *cs++ = 0;
 
        /* number of threads & urb entries for GPGPU vs Media Mode */
-       *cs++ = threads << 16 | urb_entries << 8 | mode << 2;
+       *cs++ = threads << 16 | 1 << 8 | mode << 2;
 
        *cs++ = 0;
 
@@ -293,17 +310,12 @@ gen7_emit_media_object(struct batch_chunk *batch,
 {
        unsigned int x_offset = (media_object_index % 16) * 64;
        unsigned int y_offset = (media_object_index / 16) * 16;
-       unsigned int inline_data_size;
-       unsigned int media_batch_size;
-       unsigned int i;
+       unsigned int pkt = 6 + 3;
        u32 *cs;
 
-       inline_data_size = 112 * 8;
-       media_batch_size = inline_data_size + 6;
-
-       cs = batch_alloc_items(batch, 8, media_batch_size);
+       cs = batch_alloc_items(batch, 8, pkt);
 
-       *cs++ = MEDIA_OBJECT | (media_batch_size - 2);
+       *cs++ = MEDIA_OBJECT | (pkt - 2);
 
        /* interface descriptor offset */
        *cs++ = 0;
@@ -317,25 +329,44 @@ gen7_emit_media_object(struct batch_chunk *batch,
        *cs++ = 0;
 
        /* inline */
-       *cs++ = (y_offset << 16) | (x_offset);
+       *cs++ = y_offset << 16 | x_offset;
        *cs++ = 0;
        *cs++ = GT3_INLINE_DATA_DELAYS;
-       for (i = 3; i < inline_data_size; i++)
-               *cs++ = 0;
 
        batch_advance(batch, cs);
 }
 
 static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
 {
-       u32 *cs = batch_alloc_items(batch, 0, 5);
+       u32 *cs = batch_alloc_items(batch, 0, 4);
 
-       *cs++ = GFX_OP_PIPE_CONTROL(5);
-       *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE |
-               PIPE_CONTROL_GLOBAL_GTT_IVB;
+       *cs++ = GFX_OP_PIPE_CONTROL(4);
+       *cs++ = PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+               PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+               PIPE_CONTROL_DC_FLUSH_ENABLE |
+               PIPE_CONTROL_CS_STALL;
        *cs++ = 0;
        *cs++ = 0;
+
+       batch_advance(batch, cs);
+}
+
+static void gen7_emit_pipeline_invalidate(struct batch_chunk *batch)
+{
+       u32 *cs = batch_alloc_items(batch, 0, 8);
+
+       /* ivb: Stall before STATE_CACHE_INVALIDATE */
+       *cs++ = GFX_OP_PIPE_CONTROL(4);
+       *cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD |
+               PIPE_CONTROL_CS_STALL;
+       *cs++ = 0;
+       *cs++ = 0;
+
+       *cs++ = GFX_OP_PIPE_CONTROL(4);
+       *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE;
        *cs++ = 0;
+       *cs++ = 0;
+
        batch_advance(batch, cs);
 }
 
@@ -344,34 +375,34 @@ static void emit_batch(struct i915_vma * const vma,
                       const struct batch_vals *bv)
 {
        struct drm_i915_private *i915 = vma->vm->i915;
-       unsigned int desc_count = 64;
-       const u32 urb_size = 112;
+       const unsigned int desc_count = 1;
+       const unsigned int urb_size = 1;
        struct batch_chunk cmds, state;
-       u32 interface_descriptor;
+       u32 descriptors;
        unsigned int i;
 
-       batch_init(&cmds, vma, start, 0, bv->cmd_size);
-       batch_init(&state, vma, start, bv->state_start, bv->state_size);
+       batch_init(&cmds, vma, start, 0, bv->state_start);
+       batch_init(&state, vma, start, bv->state_start, SZ_4K);
 
-       interface_descriptor =
-               gen7_fill_interface_descriptor(&state, bv,
-                                              IS_HASWELL(i915) ?
-                                              &cb_kernel_hsw :
-                                              &cb_kernel_ivb,
-                                              desc_count);
-       gen7_emit_pipeline_flush(&cmds);
+       descriptors = gen7_fill_interface_descriptor(&state, bv,
+                                                    IS_HASWELL(i915) ?
+                                                    &cb_kernel_hsw :
+                                                    &cb_kernel_ivb,
+                                                    desc_count);
+
+       gen7_emit_pipeline_invalidate(&cmds);
        batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
        batch_add(&cmds, MI_NOOP);
-       gen7_emit_state_base_address(&cmds, interface_descriptor);
+       gen7_emit_pipeline_invalidate(&cmds);
+
        gen7_emit_pipeline_flush(&cmds);
+       gen7_emit_state_base_address(&cmds, descriptors);
+       gen7_emit_pipeline_invalidate(&cmds);
 
        gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
+       gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count);
 
-       gen7_emit_interface_descriptor_load(&cmds,
-                                           interface_descriptor,
-                                           desc_count);
-
-       for (i = 0; i < bv->max_primitives; i++)
+       for (i = 0; i < num_primitives(bv); i++)
                gen7_emit_media_object(&cmds, i);
 
        batch_add(&cmds, MI_BATCH_BUFFER_END);
@@ -385,15 +416,15 @@ int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
 
        batch_get_defaults(engine->i915, &bv);
        if (!vma)
-               return bv.max_size;
+               return bv.size;
 
-       GEM_BUG_ON(vma->obj->base.size < bv.max_size);
+       GEM_BUG_ON(vma->obj->base.size < bv.size);
 
        batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
        if (IS_ERR(batch))
                return PTR_ERR(batch);
 
-       emit_batch(vma, memset(batch, 0, bv.max_size), &bv);
+       emit_batch(vma, memset(batch, 0, bv.size), &bv);
 
        i915_gem_object_flush_map(vma->obj);
        __i915_gem_object_release_map(vma->obj);
index a41b43f445b8ae3fee0f545a7d3a29e5ef10a3c9..ecf3a6118a6d7989a36488672faca28edf37fdc6 100644 (file)
@@ -32,6 +32,7 @@
 #include "gen6_ppgtt.h"
 #include "gen7_renderclear.h"
 #include "i915_drv.h"
+#include "i915_mitigations.h"
 #include "intel_breadcrumbs.h"
 #include "intel_context.h"
 #include "intel_gt.h"
@@ -886,7 +887,8 @@ static int switch_context(struct i915_request *rq)
        GEM_BUG_ON(HAS_EXECLISTS(engine->i915));
 
        if (engine->wa_ctx.vma && ce != engine->kernel_context) {
-               if (engine->wa_ctx.vma->private != ce) {
+               if (engine->wa_ctx.vma->private != ce &&
+                   i915_mitigate_clear_residuals()) {
                        ret = clear_residuals(rq);
                        if (ret)
                                return ret;
@@ -1290,7 +1292,7 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
 
        GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
 
-       if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) {
+       if (IS_GEN(engine->i915, 7) && engine->class == RENDER_CLASS) {
                err = gen7_ctx_switch_bb_init(engine);
                if (err)
                        goto err_ring_unpin;
index 180c23e2e25e4ed3291a995c7590c0646c89d42a..602f1a0bc587145849a85a726026f63eebbcfdbf 100644 (file)
@@ -53,6 +53,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
        fw_def(ELKHARTLAKE, 0, guc_def(ehl, 49, 0, 1), huc_def(ehl,  9, 0, 0)) \
        fw_def(ICELAKE,     0, guc_def(icl, 49, 0, 1), huc_def(icl,  9, 0, 0)) \
        fw_def(COMETLAKE,   5, guc_def(cml, 49, 0, 1), huc_def(cml,  4, 0, 0)) \
+       fw_def(COMETLAKE,   0, guc_def(kbl, 49, 0, 1), huc_def(kbl,  4, 0, 0)) \
        fw_def(COFFEELAKE,  0, guc_def(kbl, 49, 0, 1), huc_def(kbl,  4, 0, 0)) \
        fw_def(GEMINILAKE,  0, guc_def(glk, 49, 0, 1), huc_def(glk,  4, 0, 0)) \
        fw_def(KABYLAKE,    0, guc_def(kbl, 49, 0, 1), huc_def(kbl,  4, 0, 0)) \
index a15f875396576d0a0e56a79068de2ec75d0758bd..62a5b0dd2003b76ece28cc0a12db1dd178ee8e07 100644 (file)
@@ -217,6 +217,15 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                                  DDI_BUF_CTL_ENABLE);
                        vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) |= DDI_BUF_IS_IDLE;
                }
+               vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+                       ~(PORTA_HOTPLUG_ENABLE | PORTA_HOTPLUG_STATUS_MASK);
+               vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+                       ~(PORTB_HOTPLUG_ENABLE | PORTB_HOTPLUG_STATUS_MASK);
+               vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+                       ~(PORTC_HOTPLUG_ENABLE | PORTC_HOTPLUG_STATUS_MASK);
+               /* No hpd_invert set in vgpu vbt, need to clear invert mask */
+               vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= ~BXT_DDI_HPD_INVERT_MASK;
+               vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~BXT_DE_PORT_HOTPLUG_MASK;
 
                vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1));
                vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
@@ -273,6 +282,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                        vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)) |=
                                (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
                                 TRANS_DDI_FUNC_ENABLE);
+                       vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+                               PORTA_HOTPLUG_ENABLE;
                        vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
                                GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
                }
@@ -301,6 +312,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                                (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
                                 (PORT_B << TRANS_DDI_PORT_SHIFT) |
                                 TRANS_DDI_FUNC_ENABLE);
+                       vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+                               PORTB_HOTPLUG_ENABLE;
                        vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
                                GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
                }
@@ -329,6 +342,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                                (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
                                 (PORT_B << TRANS_DDI_PORT_SHIFT) |
                                 TRANS_DDI_FUNC_ENABLE);
+                       vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+                               PORTC_HOTPLUG_ENABLE;
                        vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
                                GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
                }
@@ -661,44 +676,62 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
                                PORTD_HOTPLUG_STATUS_MASK;
                intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG);
        } else if (IS_BROXTON(i915)) {
-               if (connected) {
-                       if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+               if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+                       if (connected) {
                                vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
                                        GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
+                       } else {
+                               vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
+                                       ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
                        }
-                       if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
-                               vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
-                                       SFUSE_STRAP_DDIB_DETECTED;
+                       vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
+                               GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
+                       vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+                               ~PORTA_HOTPLUG_STATUS_MASK;
+                       vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+                               PORTA_HOTPLUG_LONG_DETECT;
+                       intel_vgpu_trigger_virtual_event(vgpu, DP_A_HOTPLUG);
+               }
+               if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+                       if (connected) {
                                vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
                                        GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
-                       }
-                       if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
                                vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
-                                       SFUSE_STRAP_DDIC_DETECTED;
-                               vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
-                                       GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
-                       }
-               } else {
-                       if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+                                       SFUSE_STRAP_DDIB_DETECTED;
+                       } else {
                                vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
-                                       ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
-                       }
-                       if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+                                       ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
                                vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
                                        ~SFUSE_STRAP_DDIB_DETECTED;
-                               vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
-                                       ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
                        }
-                       if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
-                               vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
-                                       ~SFUSE_STRAP_DDIC_DETECTED;
+                       vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
+                               GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
+                       vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+                               ~PORTB_HOTPLUG_STATUS_MASK;
+                       vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+                               PORTB_HOTPLUG_LONG_DETECT;
+                       intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG);
+               }
+               if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
+                       if (connected) {
+                               vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+                                       GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
+                               vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
+                                       SFUSE_STRAP_DDIC_DETECTED;
+                       } else {
                                vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
                                        ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
+                               vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
+                                       ~SFUSE_STRAP_DDIC_DETECTED;
                        }
+                       vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
+                               GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
+                       vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+                               ~PORTC_HOTPLUG_STATUS_MASK;
+                       vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+                               PORTC_HOTPLUG_LONG_DETECT;
+                       intel_vgpu_trigger_virtual_event(vgpu, DP_C_HOTPLUG);
                }
-               vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
-                       PORTB_HOTPLUG_STATUS_MASK;
-               intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG);
        }
 }
 
index e49944fde3339aafd16962ace9fab7337e0c0f6a..cbe5931906e0a8976aa7ecd4b0d820d05fd6ea42 100644 (file)
@@ -437,10 +437,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        if (ret)
                goto out_clean_sched_policy;
 
-       if (IS_BROADWELL(dev_priv))
+       if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv))
                ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B);
-       /* FixMe: Re-enable APL/BXT once vfio_edid enabled */
-       else if (!IS_BROXTON(dev_priv))
+       else
                ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
        if (ret)
                goto out_clean_sched_policy;
index 93265951fdbbdbd03d80c8de5c327583a83dedab..b0899b665e85207007de4c7f43f5b805523ac931 100644 (file)
@@ -1166,7 +1166,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
                }
        }
        if (IS_ERR(src)) {
-               unsigned long x, n;
+               unsigned long x, n, remain;
                void *ptr;
 
                /*
@@ -1177,14 +1177,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
                 * We don't care about copying too much here as we only
                 * validate up to the end of the batch.
                 */
+               remain = length;
                if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
-                       length = round_up(length,
+                       remain = round_up(remain,
                                          boot_cpu_data.x86_clflush_size);
 
                ptr = dst;
                x = offset_in_page(offset);
-               for (n = offset >> PAGE_SHIFT; length; n++) {
-                       int len = min(length, PAGE_SIZE - x);
+               for (n = offset >> PAGE_SHIFT; remain; n++) {
+                       int len = min(remain, PAGE_SIZE - x);
 
                        src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
                        if (needs_clflush)
@@ -1193,13 +1194,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
                        kunmap_atomic(src);
 
                        ptr += len;
-                       length -= len;
+                       remain -= len;
                        x = 0;
                }
        }
 
        i915_gem_object_unpin_pages(src_obj);
 
+       memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32));
+
        /* dst_obj is returned with vmap pinned */
        return dst;
 }
@@ -1392,11 +1395,6 @@ static unsigned long *alloc_whitelist(u32 batch_length)
 
 #define LENGTH_BIAS 2
 
-static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
-{
-       return !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
-}
-
 /**
  * intel_engine_cmd_parser() - parse a batch buffer for privilege violations
  * @engine: the engine on which the batch is to execute
@@ -1538,16 +1536,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
                                ret = 0; /* allow execution */
                        }
                }
-
-               if (shadow_needs_clflush(shadow->obj))
-                       drm_clflush_virt_range(batch_end, 8);
        }
 
-       if (shadow_needs_clflush(shadow->obj)) {
-               void *ptr = page_mask_bits(shadow->obj->mm.mapping);
-
-               drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
-       }
+       i915_gem_object_flush_map(shadow->obj);
 
        if (!IS_ERR_OR_NULL(jump_whitelist))
                kfree(jump_whitelist);
index 320856b665a17b69d465c28920439a7c7c51aefa..99eb0d7bbc447fb7b4c58798ce66e6752189bc18 100644 (file)
@@ -578,8 +578,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
 
        pci_set_master(pdev);
 
-       cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
-
        intel_gt_init_workarounds(dev_priv);
 
        /* On the 945G/GM, the chipset reports the MSI capability on the
@@ -626,7 +624,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
 err_msi:
        if (pdev->msi_enabled)
                pci_disable_msi(pdev);
-       cpu_latency_qos_remove_request(&dev_priv->pm_qos);
 err_mem_regions:
        intel_memory_regions_driver_release(dev_priv);
 err_ggtt:
@@ -648,8 +645,6 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
 
        if (pdev->msi_enabled)
                pci_disable_msi(pdev);
-
-       cpu_latency_qos_remove_request(&dev_priv->pm_qos);
 }
 
 /**
@@ -1052,6 +1047,8 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
 
 void i915_driver_shutdown(struct drm_i915_private *i915)
 {
+       disable_rpm_wakeref_asserts(&i915->runtime_pm);
+
        i915_gem_suspend(i915);
 
        drm_kms_helper_poll_disable(&i915->drm);
@@ -1065,6 +1062,8 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
 
        intel_suspend_encoders(i915);
        intel_shutdown_encoders(i915);
+
+       enable_rpm_wakeref_asserts(&i915->runtime_pm);
 }
 
 static bool suspend_to_idle(struct drm_i915_private *dev_priv)
index 0a3ee4f9dc0a77d46eda29821a9df04e5a2e7ad1..632c713227dc7011bb0fa1f6017d78e76ce0ac74 100644 (file)
@@ -891,9 +891,6 @@ struct drm_i915_private {
 
        bool display_irqs_enabled;
 
-       /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
-       struct pm_qos_request pm_qos;
-
        /* Sideband mailbox protection */
        struct mutex sb_lock;
        struct pm_qos_request sb_qos;
diff --git a/drivers/gpu/drm/i915/i915_mitigations.c b/drivers/gpu/drm/i915/i915_mitigations.c
new file mode 100644 (file)
index 0000000..84f1259
--- /dev/null
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "i915_drv.h"
+#include "i915_mitigations.h"
+
+static unsigned long mitigations __read_mostly = ~0UL;
+
+enum {
+       CLEAR_RESIDUALS = 0,
+};
+
+static const char * const names[] = {
+       [CLEAR_RESIDUALS] = "residuals",
+};
+
+bool i915_mitigate_clear_residuals(void)
+{
+       return READ_ONCE(mitigations) & BIT(CLEAR_RESIDUALS);
+}
+
+static int mitigations_set(const char *val, const struct kernel_param *kp)
+{
+       unsigned long new = ~0UL;
+       char *str, *sep, *tok;
+       bool first = true;
+       int err = 0;
+
+       BUILD_BUG_ON(ARRAY_SIZE(names) >= BITS_PER_TYPE(mitigations));
+
+       str = kstrdup(val, GFP_KERNEL);
+       if (!str)
+               return -ENOMEM;
+
+       for (sep = str; (tok = strsep(&sep, ","));) {
+               bool enable = true;
+               int i;
+
+               /* Be tolerant of leading/trailing whitespace */
+               tok = strim(tok);
+
+               if (first) {
+                       first = false;
+
+                       if (!strcmp(tok, "auto"))
+                               continue;
+
+                       new = 0;
+                       if (!strcmp(tok, "off"))
+                               continue;
+               }
+
+               if (*tok == '!') {
+                       enable = !enable;
+                       tok++;
+               }
+
+               if (!strncmp(tok, "no", 2)) {
+                       enable = !enable;
+                       tok += 2;
+               }
+
+               if (*tok == '\0')
+                       continue;
+
+               for (i = 0; i < ARRAY_SIZE(names); i++) {
+                       if (!strcmp(tok, names[i])) {
+                               if (enable)
+                                       new |= BIT(i);
+                               else
+                                       new &= ~BIT(i);
+                               break;
+                       }
+               }
+               if (i == ARRAY_SIZE(names)) {
+                       pr_err("Bad \"%s.mitigations=%s\", '%s' is unknown\n",
+                              DRIVER_NAME, val, tok);
+                       err = -EINVAL;
+                       break;
+               }
+       }
+       kfree(str);
+       if (err)
+               return err;
+
+       WRITE_ONCE(mitigations, new);
+       return 0;
+}
+
+static int mitigations_get(char *buffer, const struct kernel_param *kp)
+{
+       unsigned long local = READ_ONCE(mitigations);
+       int count, i;
+       bool enable;
+
+       if (!local)
+               return scnprintf(buffer, PAGE_SIZE, "%s\n", "off");
+
+       if (local & BIT(BITS_PER_LONG - 1)) {
+               count = scnprintf(buffer, PAGE_SIZE, "%s,", "auto");
+               enable = false;
+       } else {
+               enable = true;
+               count = 0;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(names); i++) {
+               if ((local & BIT(i)) != enable)
+                       continue;
+
+               count += scnprintf(buffer + count, PAGE_SIZE - count,
+                                  "%s%s,", enable ? "" : "!", names[i]);
+       }
+
+       buffer[count - 1] = '\n';
+       return count;
+}
+
+static const struct kernel_param_ops ops = {
+       .set = mitigations_set,
+       .get = mitigations_get,
+};
+
+module_param_cb_unsafe(mitigations, &ops, NULL, 0600);
+MODULE_PARM_DESC(mitigations,
+"Selectively enable security mitigations for all Intel® GPUs in the system.\n"
+"\n"
+"  auto -- enables all mitigations required for the platform [default]\n"
+"  off  -- disables all mitigations\n"
+"\n"
+"Individual mitigations can be enabled by passing a comma-separated string,\n"
+"e.g. mitigations=residuals to enable only clearing residuals or\n"
+"mitigations=auto,noresiduals to disable only the clear residual mitigation.\n"
+"Either '!' or 'no' may be used to switch from enabling the mitigation to\n"
+"disabling it.\n"
+"\n"
+"Active mitigations for Ivybridge, Baytrail, Haswell:\n"
+"  residuals -- clear all thread-local registers between contexts"
+);
diff --git a/drivers/gpu/drm/i915/i915_mitigations.h b/drivers/gpu/drm/i915/i915_mitigations.h
new file mode 100644 (file)
index 0000000..1359d81
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __I915_MITIGATIONS_H__
+#define __I915_MITIGATIONS_H__
+
+#include <linux/types.h>
+
+bool i915_mitigate_clear_residuals(void);
+
+#endif /* __I915_MITIGATIONS_H__ */
index 7e82c41a85f1a62b8d44e736e7f360b27b40d344..bdc989183c648bb690a5832d20b4a5b760faa7f7 100644 (file)
@@ -534,8 +534,10 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
 
        if (!gpu->aspace) {
                dev_err(dev->dev, "No memory protection without MMU\n");
-               ret = -ENXIO;
-               goto fail;
+               if (!allow_vram_carveout) {
+                       ret = -ENXIO;
+                       goto fail;
+               }
        }
 
        return gpu;
index 93da6683a8661b6bfc6d88b96d5f095bae423d8f..4534633fe7cdb267718cbf58efef5ab5554203a4 100644 (file)
@@ -564,8 +564,10 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
                 * implement a cmdstream validator.
                 */
                DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
-               ret = -ENXIO;
-               goto fail;
+               if (!allow_vram_carveout) {
+                       ret = -ENXIO;
+                       goto fail;
+               }
        }
 
        icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
index c0be3a0f36b2ced405d5b16d0467928776493b1f..82bebb40234de14016e4e25768690706d8120283 100644 (file)
@@ -692,8 +692,10 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
                 * implement a cmdstream validator.
                 */
                DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
-               ret = -ENXIO;
-               goto fail;
+               if (!allow_vram_carveout) {
+                       ret = -ENXIO;
+                       goto fail;
+               }
        }
 
        icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
index 87c8b033ad1a6644a66f9d751358a1b3f1c6dfa5..12e75ba360f95548f8af4864aa0c599c9dd57ce8 100644 (file)
@@ -18,6 +18,10 @@ bool snapshot_debugbus = false;
 MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)");
 module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600);
 
+bool allow_vram_carveout = false;
+MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU");
+module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600);
+
 static const struct adreno_info gpulist[] = {
        {
                .rev   = ADRENO_REV(2, 0, 0, 0),
index 6cf9975e951ed782dbd8e6a2cbe8a93c350761e1..f09175698827a70fa97d362774aebb521d5121c8 100644 (file)
@@ -191,8 +191,6 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
                struct platform_device *pdev)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
-       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
-       struct io_pgtable_domain_attr pgtbl_cfg;
        struct iommu_domain *iommu;
        struct msm_mmu *mmu;
        struct msm_gem_address_space *aspace;
@@ -202,13 +200,18 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
        if (!iommu)
                return NULL;
 
-       /*
-        * This allows GPU to set the bus attributes required to use system
-        * cache on behalf of the iommu page table walker.
-        */
-       if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
-               pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
-               iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
+
+       if (adreno_is_a6xx(adreno_gpu)) {
+               struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+               struct io_pgtable_domain_attr pgtbl_cfg;
+               /*
+               * This allows GPU to set the bus attributes required to use system
+               * cache on behalf of the iommu page table walker.
+               */
+               if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
+                       pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
+                       iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
+               }
        }
 
        mmu = msm_iommu_new(&pdev->dev, iommu);
index c3775f79525a78735e34f877145e0d8aea42d3c2..b3d9a333591b2d1ef6ad100c26d2fcc8d947701a 100644 (file)
@@ -18,6 +18,7 @@
 #include "adreno_pm4.xml.h"
 
 extern bool snapshot_debugbus;
+extern bool allow_vram_carveout;
 
 enum {
        ADRENO_FW_PM4 = 0,
@@ -211,6 +212,11 @@ static inline int adreno_is_a540(struct adreno_gpu *gpu)
        return gpu->revn == 540;
 }
 
+static inline bool adreno_is_a6xx(struct adreno_gpu *gpu)
+{
+       return ((gpu->revn < 700 && gpu->revn > 599));
+}
+
 static inline int adreno_is_a618(struct adreno_gpu *gpu)
 {
        return gpu->revn == 618;
index 6e971d552911f67f8a091286156fec05eb5cb623..3bc7ed21de2863823b1800b89bddf8d60cf54e7d 100644 (file)
@@ -693,6 +693,13 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
                return 0;
        }
 
+       if (state == ST_CONNECT_PENDING) {
+               /* wait until ST_CONNECTED */
+               dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */
+               mutex_unlock(&dp->event_mutex);
+               return 0;
+       }
+
        ret = dp_display_usbpd_attention_cb(&dp->pdev->dev);
        if (ret == -ECONNRESET) { /* cable unplugged */
                dp->core_initialized = false;
index 97dca3e378b7bf841d7d8103b9172c19ce5ec6a9..d1780bcac8cc8a49a59e750b2025c450d906077b 100644 (file)
@@ -167,12 +167,18 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
        panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
 
        rc = dp_panel_read_dpcd(dp_panel);
+       if (rc) {
+               DRM_ERROR("read dpcd failed %d\n", rc);
+               return rc;
+       }
+
        bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
-       if (rc || !is_link_rate_valid(bw_code) ||
+       if (!is_link_rate_valid(bw_code) ||
                        !is_lane_count_valid(dp_panel->link_info.num_lanes) ||
                        (bw_code > dp_panel->max_bw_code)) {
-               DRM_ERROR("read dpcd failed %d\n", rc);
-               return rc;
+               DRM_ERROR("Illegal link rate=%d lane=%d\n", dp_panel->link_info.rate,
+                               dp_panel->link_info.num_lanes);
+               return -EINVAL;
        }
 
        if (dp_panel->dfp_present) {
index 535a0263ceeb461d111a92e53851a25841e169de..108c405e03dd92e0535cd88c05b019c4be77fc39 100644 (file)
@@ -457,14 +457,14 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
 
        drm_mode_config_init(ddev);
 
-       /* Bind all our sub-components: */
-       ret = component_bind_all(dev, ddev);
+       ret = msm_init_vram(ddev);
        if (ret)
                goto err_destroy_mdss;
 
-       ret = msm_init_vram(ddev);
+       /* Bind all our sub-components: */
+       ret = component_bind_all(dev, ddev);
        if (ret)
-               goto err_msm_uninit;
+               goto err_destroy_mdss;
 
        dma_set_max_seg_size(dev, UINT_MAX);
 
index 9a7c49bc394f81aa34014900bc8a497eb386aa1d..9d10739c4eb2dab23cb27257e2f8a0addc31ec68 100644 (file)
@@ -96,6 +96,8 @@ static struct page **get_pages(struct drm_gem_object *obj)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
+       WARN_ON(!msm_gem_is_locked(obj));
+
        if (!msm_obj->pages) {
                struct drm_device *dev = obj->dev;
                struct page **p;
@@ -988,6 +990,8 @@ void msm_gem_free_object(struct drm_gem_object *obj)
                if (msm_obj->pages)
                        kvfree(msm_obj->pages);
 
+               put_iova_vmas(obj);
+
                /* dma_buf_detach() grabs resv lock, so we need to unlock
                 * prior to drm_prime_gem_destroy
                 */
@@ -997,11 +1001,10 @@ void msm_gem_free_object(struct drm_gem_object *obj)
        } else {
                msm_gem_vunmap(obj);
                put_pages(obj);
+               put_iova_vmas(obj);
                msm_gem_unlock(obj);
        }
 
-       put_iova_vmas(obj);
-
        drm_gem_object_release(obj);
 
        kfree(msm_obj);
@@ -1115,6 +1118,8 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
                struct msm_gem_vma *vma;
                struct page **pages;
 
+               drm_gem_private_object_init(dev, obj, size);
+
                msm_gem_lock(obj);
 
                vma = add_vma(obj, NULL);
@@ -1126,9 +1131,9 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
 
                to_msm_bo(obj)->vram_node = &vma->node;
 
-               drm_gem_private_object_init(dev, obj, size);
-
+               msm_gem_lock(obj);
                pages = get_pages(obj);
+               msm_gem_unlock(obj);
                if (IS_ERR(pages)) {
                        ret = PTR_ERR(pages);
                        goto fail;
index 6fdddb266fb1b22333780a3604c5f46825eeb81b..4488e1c061b3d94f3876ca5464d4d096c3698414 100644 (file)
@@ -37,6 +37,7 @@ nouveau-y += dispnv50/wimmc37b.o
 nouveau-y += dispnv50/wndw.o
 nouveau-y += dispnv50/wndwc37e.o
 nouveau-y += dispnv50/wndwc57e.o
+nouveau-y += dispnv50/wndwc67e.o
 
 nouveau-y += dispnv50/base.o
 nouveau-y += dispnv50/base507c.o
index 27ea3f34706d4fbdc218ca04cdf339c28d99850b..abefc2343443bef6895f95a35ac08cde64d273ca 100644 (file)
@@ -42,6 +42,7 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
                int version;
                int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
        } cores[] = {
+               { GA102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
                { TU102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
                { GV100_DISP_CORE_CHANNEL_DMA, 0, corec37d_new },
                { GP102_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
index 121c24a18f111ccee5d5240d0824fde6777139f6..31d8b2e4791dd0fbcaf75009eb3df76b80c24f9b 100644 (file)
@@ -31,6 +31,7 @@ nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
                int version;
                int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
        } curses[] = {
+               { GA102_DISP_CURSOR, 0, cursc37a_new },
                { TU102_DISP_CURSOR, 0, cursc37a_new },
                { GV100_DISP_CURSOR, 0, cursc37a_new },
                { GK104_DISP_CURSOR, 0, curs907a_new },
index 33fff388dd83c2f259808e689117d532c582aebb..c6367035970eb5e6a14fcf4e4d3c531c28de91b7 100644 (file)
@@ -222,7 +222,7 @@ nv50_dmac_wait(struct nvif_push *push, u32 size)
 
 int
 nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
-                const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
+                const s32 *oclass, u8 head, void *data, u32 size, s64 syncbuf,
                 struct nv50_dmac *dmac)
 {
        struct nouveau_cli *cli = (void *)device->object.client;
@@ -271,7 +271,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
        if (ret)
                return ret;
 
-       if (!syncbuf)
+       if (syncbuf < 0)
                return 0;
 
        ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF,
index 92bddc0836171b853dec40cb09447da4e15259f9..38dec11e7dda5577fdd11263b8e9b8b8b2a44309 100644 (file)
@@ -95,7 +95,7 @@ struct nv50_outp_atom {
 
 int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
                     const s32 *oclass, u8 head, void *data, u32 size,
-                    u64 syncbuf, struct nv50_dmac *dmac);
+                    s64 syncbuf, struct nv50_dmac *dmac);
 void nv50_dmac_destroy(struct nv50_dmac *);
 
 /*
index a1ac153d5e9844fc35c76b292888ae8c17a752c9..566fbddfc8d7f47beffdb28ef5748a324fb555b6 100644 (file)
@@ -31,6 +31,7 @@ nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
                int version;
                int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
        } wimms[] = {
+               { GA102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
                { TU102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
                { GV100_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
                {}
index 685b7087132426933adf63d9fdddf55fdc439995..b390029c69ec130872a1dc9c6373eba82dea47d5 100644 (file)
@@ -76,7 +76,7 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
        int ret;
 
        ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
-                              &oclass, 0, &args, sizeof(args), 0,
+                              &oclass, 0, &args, sizeof(args), -1,
                               &wndw->wimm);
        if (ret) {
                NV_ERROR(drm, "wimm%04x allocation failed: %d\n", oclass, ret);
index 0356474ad6f6afcd37e5c9540968d406fb5b41bc..ce451242f79eb2fc16403a5cf1d039b70ede0ced 100644 (file)
@@ -784,6 +784,7 @@ nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
                int (*new)(struct nouveau_drm *, enum drm_plane_type,
                           int, s32, struct nv50_wndw **);
        } wndws[] = {
+               { GA102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc67e_new },
                { TU102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc57e_new },
                { GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
                {}
index 3278e28800343c37fe7a88a29df1d7bcee6cab41..f4e0c508003441eea6b190d77f5caf6b3afa258e 100644 (file)
@@ -129,6 +129,14 @@ int wndwc37e_update(struct nv50_wndw *, u32 *);
 
 int wndwc57e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
                 struct nv50_wndw **);
+bool wndwc57e_ilut(struct nv50_wndw *, struct nv50_wndw_atom *, int);
+int wndwc57e_ilut_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int wndwc57e_ilut_clr(struct nv50_wndw *);
+int wndwc57e_csc_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int wndwc57e_csc_clr(struct nv50_wndw *);
+
+int wndwc67e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
+                struct nv50_wndw **);
 
 int nv50_wndw_new(struct nouveau_drm *, enum drm_plane_type, int index,
                  struct nv50_wndw **);
index 429be0bb022206678f4fd7d76e19cfeca18e916d..abdd3bb658b383e7fe4f28d315655dad31b93a13 100644 (file)
@@ -80,7 +80,7 @@ wndwc57e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
        return 0;
 }
 
-static int
+int
 wndwc57e_csc_clr(struct nv50_wndw *wndw)
 {
        struct nvif_push *push = wndw->wndw.push;
@@ -98,7 +98,7 @@ wndwc57e_csc_clr(struct nv50_wndw *wndw)
        return 0;
 }
 
-static int
+int
 wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
        struct nvif_push *push = wndw->wndw.push;
@@ -111,7 +111,7 @@ wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
        return 0;
 }
 
-static int
+int
 wndwc57e_ilut_clr(struct nv50_wndw *wndw)
 {
        struct nvif_push *push = wndw->wndw.push;
@@ -124,7 +124,7 @@ wndwc57e_ilut_clr(struct nv50_wndw *wndw)
        return 0;
 }
 
-static int
+int
 wndwc57e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
        struct nvif_push *push = wndw->wndw.push;
@@ -179,7 +179,7 @@ wndwc57e_ilut_load(struct drm_color_lut *in, int size, void __iomem *mem)
        writew(readw(mem - 4), mem + 4);
 }
 
-static bool
+bool
 wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
 {
        if (size = size ? size : 1024, size != 256 && size != 1024)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c
new file mode 100644 (file)
index 0000000..7a370fa
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "wndw.h"
+#include "atom.h"
+
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc57e.h>
+
+static int
+wndwc67e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 17)))
+               return ret;
+
+       PUSH_MTHD(push, NVC57E, SET_PRESENT_CONTROL,
+                 NVVAL(NVC57E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval) |
+                 NVVAL(NVC57E, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+                 NVDEF(NVC57E, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE));
+
+       PUSH_MTHD(push, NVC57E, SET_SIZE,
+                 NVVAL(NVC57E, SET_SIZE, WIDTH, asyw->image.w) |
+                 NVVAL(NVC57E, SET_SIZE, HEIGHT, asyw->image.h),
+
+                               SET_STORAGE,
+                 NVVAL(NVC57E, SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh),
+
+                               SET_PARAMS,
+                 NVVAL(NVC57E, SET_PARAMS, FORMAT, asyw->image.format) |
+                 NVDEF(NVC57E, SET_PARAMS, CLAMP_BEFORE_BLEND, DISABLE) |
+                 NVDEF(NVC57E, SET_PARAMS, SWAP_UV, DISABLE) |
+                 NVDEF(NVC57E, SET_PARAMS, FMT_ROUNDING_MODE, ROUND_TO_NEAREST),
+
+                               SET_PLANAR_STORAGE(0),
+                 NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.blocks[0]) |
+                 NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.pitch[0] >> 6));
+
+       PUSH_MTHD(push, NVC57E, SET_CONTEXT_DMA_ISO(0), asyw->image.handle, 1);
+       PUSH_MTHD(push, NVC57E, SET_OFFSET(0), asyw->image.offset[0] >> 8);
+
+       PUSH_MTHD(push, NVC57E, SET_POINT_IN(0),
+                 NVVAL(NVC57E, SET_POINT_IN, X, asyw->state.src_x >> 16) |
+                 NVVAL(NVC57E, SET_POINT_IN, Y, asyw->state.src_y >> 16));
+
+       PUSH_MTHD(push, NVC57E, SET_SIZE_IN,
+                 NVVAL(NVC57E, SET_SIZE_IN, WIDTH, asyw->state.src_w >> 16) |
+                 NVVAL(NVC57E, SET_SIZE_IN, HEIGHT, asyw->state.src_h >> 16));
+
+       PUSH_MTHD(push, NVC57E, SET_SIZE_OUT,
+                 NVVAL(NVC57E, SET_SIZE_OUT, WIDTH, asyw->state.crtc_w) |
+                 NVVAL(NVC57E, SET_SIZE_OUT, HEIGHT, asyw->state.crtc_h));
+       return 0;
+}
+
+static const struct nv50_wndw_func
+wndwc67e = {
+       .acquire = wndwc37e_acquire,
+       .release = wndwc37e_release,
+       .sema_set = wndwc37e_sema_set,
+       .sema_clr = wndwc37e_sema_clr,
+       .ntfy_set = wndwc37e_ntfy_set,
+       .ntfy_clr = wndwc37e_ntfy_clr,
+       .ntfy_reset = corec37d_ntfy_init,
+       .ntfy_wait_begun = base507c_ntfy_wait_begun,
+       .ilut = wndwc57e_ilut,
+       .ilut_identity = true,
+       .ilut_size = 1024,
+       .xlut_set = wndwc57e_ilut_set,
+       .xlut_clr = wndwc57e_ilut_clr,
+       .csc = base907c_csc,
+       .csc_set = wndwc57e_csc_set,
+       .csc_clr = wndwc57e_csc_clr,
+       .image_set = wndwc67e_image_set,
+       .image_clr = wndwc37e_image_clr,
+       .blend_set = wndwc37e_blend_set,
+       .update = wndwc37e_update,
+};
+
+int
+wndwc67e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
+            s32 oclass, struct nv50_wndw **pwndw)
+{
+       return wndwc37e_new_(&wndwc67e, drm, type, index, oclass, BIT(index >> 1), pwndw);
+}
index cd9a2e687bb61ba96cfe347ef3b25cc9e8e78fb5..57d4f457a7d4ad03b4bd998a1ef1777923ebab83 100644 (file)
@@ -33,6 +33,7 @@ struct nv_device_info_v0 {
 #define NV_DEVICE_INFO_V0_PASCAL                                           0x0a
 #define NV_DEVICE_INFO_V0_VOLTA                                            0x0b
 #define NV_DEVICE_INFO_V0_TURING                                           0x0c
+#define NV_DEVICE_INFO_V0_AMPERE                                           0x0d
        __u8  family;
        __u8  pad06[2];
        __u64 ram_size;
index 2c79beb41126fa75ff55c2479afdba27db841320..ba2c28ea43d20f46b6aca9a2e4e2798a50de45d0 100644 (file)
@@ -88,6 +88,7 @@
 #define GP102_DISP                                    /* cl5070.h */ 0x00009870
 #define GV100_DISP                                    /* cl5070.h */ 0x0000c370
 #define TU102_DISP                                    /* cl5070.h */ 0x0000c570
+#define GA102_DISP                                    /* cl5070.h */ 0x0000c670
 
 #define GV100_DISP_CAPS                                              0x0000c373
 
 #define GK104_DISP_CURSOR                             /* cl507a.h */ 0x0000917a
 #define GV100_DISP_CURSOR                             /* cl507a.h */ 0x0000c37a
 #define TU102_DISP_CURSOR                             /* cl507a.h */ 0x0000c57a
+#define GA102_DISP_CURSOR                             /* cl507a.h */ 0x0000c67a
 
 #define NV50_DISP_OVERLAY                             /* cl507b.h */ 0x0000507b
 #define G82_DISP_OVERLAY                              /* cl507b.h */ 0x0000827b
 
 #define GV100_DISP_WINDOW_IMM_CHANNEL_DMA             /* clc37b.h */ 0x0000c37b
 #define TU102_DISP_WINDOW_IMM_CHANNEL_DMA             /* clc37b.h */ 0x0000c57b
+#define GA102_DISP_WINDOW_IMM_CHANNEL_DMA             /* clc37b.h */ 0x0000c67b
 
 #define NV50_DISP_BASE_CHANNEL_DMA                    /* cl507c.h */ 0x0000507c
 #define G82_DISP_BASE_CHANNEL_DMA                     /* cl507c.h */ 0x0000827c
 #define GP102_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000987d
 #define GV100_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000c37d
 #define TU102_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000c57d
+#define GA102_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000c67d
 
 #define NV50_DISP_OVERLAY_CHANNEL_DMA                 /* cl507e.h */ 0x0000507e
 #define G82_DISP_OVERLAY_CHANNEL_DMA                  /* cl507e.h */ 0x0000827e
 
 #define GV100_DISP_WINDOW_CHANNEL_DMA                 /* clc37e.h */ 0x0000c37e
 #define TU102_DISP_WINDOW_CHANNEL_DMA                 /* clc37e.h */ 0x0000c57e
+#define GA102_DISP_WINDOW_CHANNEL_DMA                 /* clc37e.h */ 0x0000c67e
 
 #define NV50_TESLA                                                   0x00005097
 #define G82_TESLA                                                    0x00008297
index 5c007ce62fc34e1a42f5d2be2d0da98106a76416..c920939a1467946c1c2281a1798d92dda5dc8623 100644 (file)
@@ -120,6 +120,7 @@ struct nvkm_device {
                GP100    = 0x130,
                GV100    = 0x140,
                TU100    = 0x160,
+               GA100    = 0x170,
        } card_type;
        u32 chipset;
        u8  chiprev;
index 5a96c942d912f0c68edc9227fd7eab1ef08ab503..0f6fa6631a197aaa7f26479e5bf24192678a0091 100644 (file)
@@ -37,4 +37,5 @@ int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 int gp102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 int gv100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 int tu102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int ga102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 #endif
index 1a39e52e09e36e7a92f5c104509c20f9cf17a7b0..50cc7c05eac49c64587bc3c90b24994e82505566 100644 (file)
@@ -32,4 +32,5 @@ int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 int gm200_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 int gv100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 int tu102_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int ga100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 #endif
index 34b56b10218a8f782ef0a581eabbbed9b3bf14ed..2ecd52aec1d121d050844e1e2e8b425c64c54aee 100644 (file)
@@ -86,6 +86,8 @@ int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 int gp102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 int gp10b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 int gv100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int ga100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int ga102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 
 #include <subdev/bios.h>
 #include <subdev/bios/ramcfg.h>
index eaacf8d80527cfc080ab34fccd496f16090bc27e..cdcce5ece6ff5973426f791cb4f02fc27537a904 100644 (file)
@@ -37,4 +37,5 @@ int nv50_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
 int g94_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
 int gf119_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
 int gk104_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int ga102_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
 #endif
index 81b977319640ae9d7331ea07c78c1e89a313edd6..640f649ce497e4a9c93f8fc5ad1940f4ba205125 100644 (file)
@@ -92,6 +92,7 @@ int g94_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
 int gf117_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
 int gf119_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
 int gk104_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int gk110_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
 int gm200_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
 
 static inline int
index 6641fe4c252c62bf8d2c4c42d97e2e64a984489c..e45ca458396709be315edb7cf88c36604db96dc4 100644 (file)
@@ -32,4 +32,5 @@ int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int gp10b_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int tu102_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int ga100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 #endif
index c7a94c94dbf378289b6cbcbd76b51f8b47d18497..72f35a2babcb20eef0450df7f25f4ee71dc2d9f0 100644 (file)
@@ -256,6 +256,7 @@ nouveau_backlight_init(struct drm_connector *connector)
        case NV_DEVICE_INFO_V0_PASCAL:
        case NV_DEVICE_INFO_V0_VOLTA:
        case NV_DEVICE_INFO_V0_TURING:
+       case NV_DEVICE_INFO_V0_AMPERE: //XXX: not confirmed
                ret = nv50_backlight_init(nv_encoder, &props, &ops);
                break;
        default:
index 8d0d30e08f57eff1842fc7fe1d98526ef14a392a..529cb60d5efb020faf0e4a81b45b5cd133fa5a6f 100644 (file)
@@ -35,6 +35,7 @@ nvif_disp_ctor(struct nvif_device *device, const char *name, s32 oclass,
               struct nvif_disp *disp)
 {
        static const struct nvif_mclass disps[] = {
+               { GA102_DISP, -1 },
                { TU102_DISP, -1 },
                { GV100_DISP, -1 },
                { GP102_DISP, -1 },
index 7851bec5f0e5feccd8b2be5f1fbcb2b6180a21bf..cdcc851e06f9b5d8e82341048f8e77e53f07363c 100644 (file)
@@ -1815,7 +1815,7 @@ nvf0_chipset = {
        .fb = gk110_fb_new,
        .fuse = gf100_fuse_new,
        .gpio = gk104_gpio_new,
-       .i2c = gk104_i2c_new,
+       .i2c = gk110_i2c_new,
        .ibus = gk104_ibus_new,
        .iccsense = gf100_iccsense_new,
        .imem = nv50_instmem_new,
@@ -1853,7 +1853,7 @@ nvf1_chipset = {
        .fb = gk110_fb_new,
        .fuse = gf100_fuse_new,
        .gpio = gk104_gpio_new,
-       .i2c = gk104_i2c_new,
+       .i2c = gk110_i2c_new,
        .ibus = gk104_ibus_new,
        .iccsense = gf100_iccsense_new,
        .imem = nv50_instmem_new,
@@ -1891,7 +1891,7 @@ nv106_chipset = {
        .fb = gk110_fb_new,
        .fuse = gf100_fuse_new,
        .gpio = gk104_gpio_new,
-       .i2c = gk104_i2c_new,
+       .i2c = gk110_i2c_new,
        .ibus = gk104_ibus_new,
        .iccsense = gf100_iccsense_new,
        .imem = nv50_instmem_new,
@@ -1929,7 +1929,7 @@ nv108_chipset = {
        .fb = gk110_fb_new,
        .fuse = gf100_fuse_new,
        .gpio = gk104_gpio_new,
-       .i2c = gk104_i2c_new,
+       .i2c = gk110_i2c_new,
        .ibus = gk104_ibus_new,
        .iccsense = gf100_iccsense_new,
        .imem = nv50_instmem_new,
@@ -1967,7 +1967,7 @@ nv117_chipset = {
        .fb = gm107_fb_new,
        .fuse = gm107_fuse_new,
        .gpio = gk104_gpio_new,
-       .i2c = gk104_i2c_new,
+       .i2c = gk110_i2c_new,
        .ibus = gk104_ibus_new,
        .iccsense = gf100_iccsense_new,
        .imem = nv50_instmem_new,
@@ -2003,7 +2003,7 @@ nv118_chipset = {
        .fb = gm107_fb_new,
        .fuse = gm107_fuse_new,
        .gpio = gk104_gpio_new,
-       .i2c = gk104_i2c_new,
+       .i2c = gk110_i2c_new,
        .ibus = gk104_ibus_new,
        .iccsense = gf100_iccsense_new,
        .imem = nv50_instmem_new,
@@ -2652,6 +2652,61 @@ nv168_chipset = {
        .sec2 = tu102_sec2_new,
 };
 
+static const struct nvkm_device_chip
+nv170_chipset = {
+       .name = "GA100",
+       .bar = tu102_bar_new,
+       .bios = nvkm_bios_new,
+       .devinit = ga100_devinit_new,
+       .fb = ga100_fb_new,
+       .gpio = gk104_gpio_new,
+       .i2c = gm200_i2c_new,
+       .ibus = gm200_ibus_new,
+       .imem = nv50_instmem_new,
+       .mc = ga100_mc_new,
+       .mmu = tu102_mmu_new,
+       .pci = gp100_pci_new,
+       .timer = gk20a_timer_new,
+};
+
+static const struct nvkm_device_chip
+nv172_chipset = {
+       .name = "GA102",
+       .bar = tu102_bar_new,
+       .bios = nvkm_bios_new,
+       .devinit = ga100_devinit_new,
+       .fb = ga102_fb_new,
+       .gpio = ga102_gpio_new,
+       .i2c = gm200_i2c_new,
+       .ibus = gm200_ibus_new,
+       .imem = nv50_instmem_new,
+       .mc = ga100_mc_new,
+       .mmu = tu102_mmu_new,
+       .pci = gp100_pci_new,
+       .timer = gk20a_timer_new,
+       .disp = ga102_disp_new,
+       .dma = gv100_dma_new,
+};
+
+static const struct nvkm_device_chip
+nv174_chipset = {
+       .name = "GA104",
+       .bar = tu102_bar_new,
+       .bios = nvkm_bios_new,
+       .devinit = ga100_devinit_new,
+       .fb = ga102_fb_new,
+       .gpio = ga102_gpio_new,
+       .i2c = gm200_i2c_new,
+       .ibus = gm200_ibus_new,
+       .imem = nv50_instmem_new,
+       .mc = ga100_mc_new,
+       .mmu = tu102_mmu_new,
+       .pci = gp100_pci_new,
+       .timer = gk20a_timer_new,
+       .disp = ga102_disp_new,
+       .dma = gv100_dma_new,
+};
+
 static int
 nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
                       struct nvkm_notify *notify)
@@ -3063,6 +3118,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                        case 0x130: device->card_type = GP100; break;
                        case 0x140: device->card_type = GV100; break;
                        case 0x160: device->card_type = TU100; break;
+                       case 0x170: device->card_type = GA100; break;
                        default:
                                break;
                        }
@@ -3160,10 +3216,23 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                case 0x166: device->chip = &nv166_chipset; break;
                case 0x167: device->chip = &nv167_chipset; break;
                case 0x168: device->chip = &nv168_chipset; break;
+               case 0x172: device->chip = &nv172_chipset; break;
+               case 0x174: device->chip = &nv174_chipset; break;
                default:
-                       nvdev_error(device, "unknown chipset (%08x)\n", boot0);
-                       ret = -ENODEV;
-                       goto done;
+                       if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
+                               switch (device->chipset) {
+                               case 0x170: device->chip = &nv170_chipset; break;
+                               default:
+                                       break;
+                               }
+                       }
+
+                       if (!device->chip) {
+                               nvdev_error(device, "unknown chipset (%08x)\n", boot0);
+                               ret = -ENODEV;
+                               goto done;
+                       }
+                       break;
                }
 
                nvdev_info(device, "NVIDIA %s (%08x)\n",
index 03c6d9aef075cca0b13770d25d941602bb7ef49f..1478947987860d664a89247afaecd8e7d46c9dc7 100644 (file)
@@ -176,6 +176,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
        case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break;
        case GV100: args->v0.family = NV_DEVICE_INFO_V0_VOLTA; break;
        case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break;
+       case GA100: args->v0.family = NV_DEVICE_INFO_V0_AMPERE; break;
        default:
                args->v0.family = 0;
                break;
index cf075311cdd27ae189b5c00cedc589f14b93f44e..b03f043efe261542c267a0a7a3ead68858c36f82 100644 (file)
@@ -17,6 +17,7 @@ nvkm-y += nvkm/engine/disp/gp100.o
 nvkm-y += nvkm/engine/disp/gp102.o
 nvkm-y += nvkm/engine/disp/gv100.o
 nvkm-y += nvkm/engine/disp/tu102.o
+nvkm-y += nvkm/engine/disp/ga102.o
 nvkm-y += nvkm/engine/disp/vga.o
 
 nvkm-y += nvkm/engine/disp/head.o
@@ -42,6 +43,7 @@ nvkm-y += nvkm/engine/disp/sorgm200.o
 nvkm-y += nvkm/engine/disp/sorgp100.o
 nvkm-y += nvkm/engine/disp/sorgv100.o
 nvkm-y += nvkm/engine/disp/sortu102.o
+nvkm-y += nvkm/engine/disp/sorga102.o
 
 nvkm-y += nvkm/engine/disp/outp.o
 nvkm-y += nvkm/engine/disp/dp.o
@@ -75,6 +77,7 @@ nvkm-y += nvkm/engine/disp/rootgp100.o
 nvkm-y += nvkm/engine/disp/rootgp102.o
 nvkm-y += nvkm/engine/disp/rootgv100.o
 nvkm-y += nvkm/engine/disp/roottu102.o
+nvkm-y += nvkm/engine/disp/rootga102.o
 
 nvkm-y += nvkm/engine/disp/capsgv100.o
 
index 3800aeb507d0167d4e7bb41210c6fb5fd925cdc2..55fbfe28c6dc1a50da06bc117a95cc91b303b4ac 100644 (file)
 
 #include <nvif/event.h>
 
+/* IED scripts are no longer used by UEFI/RM from Ampere, but have been updated for
+ * the x86 option ROM.  However, the relevant VBIOS table versions weren't modified,
+ * so we're unable to detect this in a nice way.
+ */
+#define AMPERE_IED_HACK(disp) ((disp)->engine.subdev.device->card_type >= GA100)
+
 struct lt_state {
        struct nvkm_dp *dp;
        u8  stat[6];
@@ -238,6 +244,19 @@ nvkm_dp_train_links(struct nvkm_dp *dp)
                dp->dpcd[DPCD_RC02] &= ~DPCD_RC02_TPS3_SUPPORTED;
        lt.pc2 = dp->dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED;
 
+       if (AMPERE_IED_HACK(disp) && (lnkcmp = lt.dp->info.script[0])) {
+               /* Execute BeforeLinkTraining script from DP Info table. */
+               while (ior->dp.bw < nvbios_rd08(bios, lnkcmp))
+                       lnkcmp += 3;
+               lnkcmp = nvbios_rd16(bios, lnkcmp + 1);
+
+               nvbios_init(&dp->outp.disp->engine.subdev, lnkcmp,
+                       init.outp = &dp->outp.info;
+                       init.or   = ior->id;
+                       init.link = ior->asy.link;
+               );
+       }
+
        /* Set desired link configuration on the source. */
        if ((lnkcmp = lt.dp->info.lnkcmp)) {
                if (dp->version < 0x30) {
@@ -316,12 +335,14 @@ nvkm_dp_train_init(struct nvkm_dp *dp)
                );
        }
 
-       /* Execute BeforeLinkTraining script from DP Info table. */
-       nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[0],
-               init.outp = &dp->outp.info;
-               init.or   = dp->outp.ior->id;
-               init.link = dp->outp.ior->asy.link;
-       );
+       if (!AMPERE_IED_HACK(dp->outp.disp)) {
+               /* Execute BeforeLinkTraining script from DP Info table. */
+               nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[0],
+                       init.outp = &dp->outp.info;
+                       init.or   = dp->outp.ior->id;
+                       init.link = dp->outp.ior->asy.link;
+               );
+       }
 }
 
 static const struct dp_rates {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
new file mode 100644 (file)
index 0000000..aa2e564
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+#include "head.h"
+#include "ior.h"
+#include "channv50.h"
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+ga102_disp = {
+       .init = tu102_disp_init,
+       .fini = gv100_disp_fini,
+       .intr = gv100_disp_intr,
+       .uevent = &gv100_disp_chan_uevent,
+       .super = gv100_disp_super,
+       .root = &ga102_disp_root_oclass,
+       .wndw = { .cnt = gv100_disp_wndw_cnt },
+       .head = { .cnt = gv100_head_cnt, .new = gv100_head_new },
+       .sor = { .cnt = gv100_sor_cnt, .new = ga102_sor_new },
+       .ramht_size = 0x2000,
+};
+
+int
+ga102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+       return nv50_disp_new_(&ga102_disp, device, index, pdisp);
+}
index 09f3038eff26fba9b411681dd66db46cf5f5273f..9f0bb7c6b0100f07d7d4b1dd36a94b5db91b072e 100644 (file)
@@ -150,6 +150,8 @@ void gv100_sor_dp_audio(struct nvkm_ior *, int, bool);
 void gv100_sor_dp_audio_sym(struct nvkm_ior *, int, u16, u32);
 void gv100_sor_dp_watermark(struct nvkm_ior *, int, u8);
 
+void tu102_sor_dp_vcpi(struct nvkm_ior *, int, u8, u8, u16, u16);
+
 void g84_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
 void gt215_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
 void gf119_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
@@ -207,4 +209,6 @@ int gv100_sor_cnt(struct nvkm_disp *, unsigned long *);
 int gv100_sor_new(struct nvkm_disp *, int);
 
 int tu102_sor_new(struct nvkm_disp *, int);
+
+int ga102_sor_new(struct nvkm_disp *, int);
 #endif
index a677161c7f3a6c77096a0dedc155e63b05a87cb1..db31b37752a270b163b9b1ea8366546bef2470fa 100644 (file)
@@ -86,6 +86,8 @@ void gv100_disp_intr(struct nv50_disp *);
 void gv100_disp_super(struct work_struct *);
 int gv100_disp_wndw_cnt(struct nvkm_disp *, unsigned long *);
 
+int tu102_disp_init(struct nv50_disp *);
+
 void nv50_disp_dptmds_war_2(struct nv50_disp *, struct dcb_output *);
 void nv50_disp_dptmds_war_3(struct nv50_disp *, struct dcb_output *);
 void nv50_disp_update_sppll1(struct nv50_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootga102.c
new file mode 100644 (file)
index 0000000..9af07c3
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "rootnv50.h"
+#include "channv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+ga102_disp_root = {
+       .user = {
+               {{-1,-1,GV100_DISP_CAPS                }, gv100_disp_caps_new },
+               {{0,0,GA102_DISP_CURSOR                }, gv100_disp_curs_new },
+               {{0,0,GA102_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
+               {{0,0,GA102_DISP_CORE_CHANNEL_DMA      }, gv100_disp_core_new },
+               {{0,0,GA102_DISP_WINDOW_CHANNEL_DMA    }, gv100_disp_wndw_new },
+               {}
+       },
+};
+
+static int
+ga102_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+                   void *data, u32 size, struct nvkm_object **pobject)
+{
+       return nv50_disp_root_new_(&ga102_disp_root, disp, oclass, data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+ga102_disp_root_oclass = {
+       .base.oclass = GA102_DISP,
+       .base.minver = -1,
+       .base.maxver = -1,
+       .ctor = ga102_disp_root_new,
+};
index 7070f5408d92bf617746b5e7baa76b45eff8f825..27bb170d02930441d99b1f09b49335b74f44e482 100644 (file)
@@ -41,4 +41,5 @@ extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
 extern const struct nvkm_disp_oclass gp102_disp_root_oclass;
 extern const struct nvkm_disp_oclass gv100_disp_root_oclass;
 extern const struct nvkm_disp_oclass tu102_disp_root_oclass;
+extern const struct nvkm_disp_oclass ga102_disp_root_oclass;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c
new file mode 100644 (file)
index 0000000..033827d
--- /dev/null
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ior.h"
+
+#include <subdev/timer.h>
+
+static int
+ga102_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
+{
+       struct nvkm_device *device = sor->disp->engine.subdev.device;
+       const u32 soff = nv50_ior_base(sor);
+       const u32 loff = nv50_sor_link(sor);
+       u32 dpctrl = 0x00000000;
+       u32 clksor = 0x00000000;
+
+       switch (sor->dp.bw) {
+       case 0x06: clksor |= 0x00000000; break;
+       case 0x0a: clksor |= 0x00040000; break;
+       case 0x14: clksor |= 0x00080000; break;
+       case 0x1e: clksor |= 0x000c0000; break;
+       default:
+               WARN_ON(1);
+               return -EINVAL;
+       }
+
+       dpctrl |= ((1 << sor->dp.nr) - 1) << 16;
+       if (sor->dp.mst)
+               dpctrl |= 0x40000000;
+       if (sor->dp.ef)
+               dpctrl |= 0x00004000;
+
+       nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
+
+       /*XXX*/
+       nvkm_msec(device, 40, NVKM_DELAY);
+       nvkm_mask(device, 0x612300 + soff, 0x00030000, 0x00010000);
+       nvkm_mask(device, 0x61c10c + loff, 0x00000003, 0x00000001);
+
+       nvkm_mask(device, 0x61c10c + loff, 0x401f4000, dpctrl);
+       return 0;
+}
+
+static void
+ga102_sor_clock(struct nvkm_ior *sor)
+{
+       struct nvkm_device *device = sor->disp->engine.subdev.device;
+       u32 div2 = 0;
+       if (sor->asy.proto == TMDS) {
+               if (sor->tmds.high_speed)
+                       div2 = 1;
+       }
+       nvkm_wr32(device, 0x00ec08 + (sor->id * 0x10), 0x00000000);
+       nvkm_wr32(device, 0x00ec04 + (sor->id * 0x10), div2);
+}
+
+static const struct nvkm_ior_func
+ga102_sor_hda = {
+       .route = {
+               .get = gm200_sor_route_get,
+               .set = gm200_sor_route_set,
+       },
+       .state = gv100_sor_state,
+       .power = nv50_sor_power,
+       .clock = ga102_sor_clock,
+       .hdmi = {
+               .ctrl = gv100_hdmi_ctrl,
+               .scdc = gm200_hdmi_scdc,
+       },
+       .dp = {
+               .lanes = { 0, 1, 2, 3 },
+               .links = ga102_sor_dp_links,
+               .power = g94_sor_dp_power,
+               .pattern = gm107_sor_dp_pattern,
+               .drive = gm200_sor_dp_drive,
+               .vcpi = tu102_sor_dp_vcpi,
+               .audio = gv100_sor_dp_audio,
+               .audio_sym = gv100_sor_dp_audio_sym,
+               .watermark = gv100_sor_dp_watermark,
+       },
+       .hda = {
+               .hpd = gf119_hda_hpd,
+               .eld = gf119_hda_eld,
+               .device_entry = gv100_hda_device_entry,
+       },
+};
+
+static const struct nvkm_ior_func
+ga102_sor = {
+       .route = {
+               .get = gm200_sor_route_get,
+               .set = gm200_sor_route_set,
+       },
+       .state = gv100_sor_state,
+       .power = nv50_sor_power,
+       .clock = ga102_sor_clock,
+       .hdmi = {
+               .ctrl = gv100_hdmi_ctrl,
+               .scdc = gm200_hdmi_scdc,
+       },
+       .dp = {
+               .lanes = { 0, 1, 2, 3 },
+               .links = ga102_sor_dp_links,
+               .power = g94_sor_dp_power,
+               .pattern = gm107_sor_dp_pattern,
+               .drive = gm200_sor_dp_drive,
+               .vcpi = tu102_sor_dp_vcpi,
+               .audio = gv100_sor_dp_audio,
+               .audio_sym = gv100_sor_dp_audio_sym,
+               .watermark = gv100_sor_dp_watermark,
+       },
+};
+
+int
+ga102_sor_new(struct nvkm_disp *disp, int id)
+{
+       struct nvkm_device *device = disp->engine.subdev.device;
+       u32 hda = nvkm_rd32(device, 0x08a15c);
+       if (hda & BIT(id))
+               return nvkm_ior_new_(&ga102_sor_hda, disp, SOR, id);
+       return nvkm_ior_new_(&ga102_sor, disp, SOR, id);
+}
index 59865a934c4b9d4e3e0c85ab4f47a0bd86b2073b..0cf9e8752d2588c83b146d3534907a7c7b7c10b6 100644 (file)
@@ -23,7 +23,7 @@
 
 #include <subdev/timer.h>
 
-static void
+void
 tu102_sor_dp_vcpi(struct nvkm_ior *sor, int head,
                  u8 slot, u8 slot_nr, u16 pbn, u16 aligned)
 {
index 883ae4151ff8887074e63fec782a211b07a5fa94..4c85d1d4fbd4265cabd179923519412766473114 100644 (file)
@@ -28,7 +28,7 @@
 #include <core/gpuobj.h>
 #include <subdev/timer.h>
 
-static int
+int
 tu102_disp_init(struct nv50_disp *disp)
 {
        struct nvkm_device *device = disp->base.engine.subdev.device;
index 7deb81b6dbac6bc5a440da281eca94b51b632f83..4b571cc6bc70f4cacb934cd731aed683ca8ec0f0 100644 (file)
@@ -75,7 +75,7 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
        nvkm_debug(subdev, "%08x: type %02x, %d bytes\n",
                   image.base, image.type, image.size);
 
-       if (!shadow_fetch(bios, mthd, image.size)) {
+       if (!shadow_fetch(bios, mthd, image.base + image.size)) {
                nvkm_debug(subdev, "%08x: fetch failed\n", image.base);
                return 0;
        }
index 3634cd0630b81ce4d697261ee0100f1ce4ec5b39..023ddc7c5399a40ffbb19da8fb14cf53a4f4871a 100644 (file)
@@ -64,6 +64,9 @@ pramin_init(struct nvkm_bios *bios, const char *name)
                return NULL;
 
        /* we can't get the bios image pointer without PDISP */
+       if (device->card_type >= GA100)
+               addr = device->chipset == 0x170; /*XXX: find the fuse reg for this */
+       else
        if (device->card_type >= GM100)
                addr = nvkm_rd32(device, 0x021c04);
        else
index b3429371ed824b7bd4b3043b0118accbcdf48655..d1abb64841dac7ad969e4ab1ff688c03c5060095 100644 (file)
@@ -15,3 +15,4 @@ nvkm-y += nvkm/subdev/devinit/gm107.o
 nvkm-y += nvkm/subdev/devinit/gm200.o
 nvkm-y += nvkm/subdev/devinit/gv100.o
 nvkm-y += nvkm/subdev/devinit/tu102.o
+nvkm-y += nvkm/subdev/devinit/ga100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c
new file mode 100644 (file)
index 0000000..636a921
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+#include <subdev/clk/pll.h>
+
+static int
+ga100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
+{
+       struct nvkm_subdev *subdev = &init->subdev;
+       struct nvkm_device *device = subdev->device;
+       struct nvbios_pll info;
+       int head = type - PLL_VPLL0;
+       int N, fN, M, P;
+       int ret;
+
+       ret = nvbios_pll_parse(device->bios, type, &info);
+       if (ret)
+               return ret;
+
+       ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
+       if (ret < 0)
+               return ret;
+
+       switch (info.type) {
+       case PLL_VPLL0:
+       case PLL_VPLL1:
+       case PLL_VPLL2:
+       case PLL_VPLL3:
+               nvkm_wr32(device, 0x00ef00 + (head * 0x40), 0x02080004);
+               nvkm_wr32(device, 0x00ef18 + (head * 0x40), (N << 16) | fN);
+               nvkm_wr32(device, 0x00ef04 + (head * 0x40), (P << 16) | M);
+               nvkm_wr32(device, 0x00e9c0 + (head * 0x04), 0x00000001);
+               break;
+       default:
+               nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static const struct nvkm_devinit_func
+ga100_devinit = {
+       .init = nv50_devinit_init,
+       .post = tu102_devinit_post,
+       .pll_set = ga100_devinit_pll_set,
+};
+
+int
+ga100_devinit_new(struct nvkm_device *device, int index, struct nvkm_devinit **pinit)
+{
+       return nv50_devinit_new_(&ga100_devinit, device, index, pinit);
+}
index 94723352137a7f9730f9a87ead7dfbafb16a9ca9..05961e624264754078e6bc48bc669db3f978c54f 100644 (file)
@@ -19,4 +19,5 @@ void nvkm_devinit_ctor(const struct nvkm_devinit_func *, struct nvkm_device *,
                       int index, struct nvkm_devinit *);
 
 int nv04_devinit_post(struct nvkm_devinit *, bool);
+int tu102_devinit_post(struct nvkm_devinit *, bool);
 #endif
index 397670e72fff936a4e7d1812490198996e318093..9a469bf482f2f5bb13b270c625d9fc37b476df2c 100644 (file)
@@ -65,7 +65,7 @@ tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
        return ret;
 }
 
-static int
+int
 tu102_devinit_post(struct nvkm_devinit *base, bool post)
 {
        struct nv50_devinit *init = nv50_devinit(base);
index 43a42159a3d00e5fe432f997ba92565a612ec2f2..5d0bab8ecb433d60dd28306048f4f7ca656ea483 100644 (file)
@@ -32,6 +32,8 @@ nvkm-y += nvkm/subdev/fb/gp100.o
 nvkm-y += nvkm/subdev/fb/gp102.o
 nvkm-y += nvkm/subdev/fb/gp10b.o
 nvkm-y += nvkm/subdev/fb/gv100.o
+nvkm-y += nvkm/subdev/fb/ga100.o
+nvkm-y += nvkm/subdev/fb/ga102.o
 
 nvkm-y += nvkm/subdev/fb/ram.o
 nvkm-y += nvkm/subdev/fb/ramnv04.o
@@ -52,6 +54,7 @@ nvkm-y += nvkm/subdev/fb/ramgk104.o
 nvkm-y += nvkm/subdev/fb/ramgm107.o
 nvkm-y += nvkm/subdev/fb/ramgm200.o
 nvkm-y += nvkm/subdev/fb/ramgp100.o
+nvkm-y += nvkm/subdev/fb/ramga102.o
 nvkm-y += nvkm/subdev/fb/sddr2.o
 nvkm-y += nvkm/subdev/fb/sddr3.o
 nvkm-y += nvkm/subdev/fb/gddr3.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
new file mode 100644 (file)
index 0000000..bf82686
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ram.h"
+
+static const struct nvkm_fb_func
+ga100_fb = {
+       .dtor = gf100_fb_dtor,
+       .oneinit = gf100_fb_oneinit,
+       .init = gp100_fb_init,
+       .init_page = gv100_fb_init_page,
+       .init_unkn = gp100_fb_init_unkn,
+       .ram_new = gp100_ram_new,
+       .default_bigpage = 16,
+};
+
+int
+ga100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+       return gp102_fb_new_(&ga100_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
new file mode 100644 (file)
index 0000000..bcecf84
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ram.h"
+
+static const struct nvkm_fb_func
+ga102_fb = {
+       .dtor = gf100_fb_dtor,
+       .oneinit = gf100_fb_oneinit,
+       .init = gp100_fb_init,
+       .init_page = gv100_fb_init_page,
+       .init_unkn = gp100_fb_init_unkn,
+       .ram_new = ga102_ram_new,
+       .default_bigpage = 16,
+};
+
+int
+ga102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+       return gp102_fb_new_(&ga102_fb, device, index, pfb);
+}
index 10ff5d053f7ea4e0f0645028906da0d6acc2a031..feda86a5fba8592000ac2657a815b72203cbaf91 100644 (file)
@@ -22,7 +22,7 @@
 #include "gf100.h"
 #include "ram.h"
 
-static int
+int
 gv100_fb_init_page(struct nvkm_fb *fb)
 {
        return (fb->page == 16) ? 0 : -EINVAL;
index 5be9c563350d7b684e4bdb3a4ca5f90805d09d28..66932ac10d15c68b65e80d982a90e54873e3d393 100644 (file)
@@ -82,4 +82,6 @@ int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, int,
                  struct nvkm_fb **);
 bool gp102_fb_vpr_scrub_required(struct nvkm_fb *);
 int gp102_fb_vpr_scrub(struct nvkm_fb *);
+
+int gv100_fb_init_page(struct nvkm_fb *);
 #endif
index d723a9b4e3c47006903c362b60f18da649b775b4..ea7d66f3dd825dba1f4eb6162e9a66e163d1677a 100644 (file)
@@ -70,4 +70,5 @@ int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
 int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
 int gm200_ram_new(struct nvkm_fb *, struct nvkm_ram **);
 int gp100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int ga102_ram_new(struct nvkm_fb *, struct nvkm_ram **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c
new file mode 100644 (file)
index 0000000..298c136
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ram.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/rammap.h>
+
+static const struct nvkm_ram_func
+ga102_ram = {
+};
+
+int
+ga102_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+       struct nvkm_device *device = fb->subdev.device;
+       enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios);
+       u32 size = nvkm_rd32(device, 0x1183a4);
+
+       return nvkm_ram_new_(&ga102_ram, fb, type, (u64)size << 20, pram);
+}
index b2ad5922a1c2dedb83d140a2ae2f561a08932850..efbbaa080de5172c1ebd02a49182e77337d0a240 100644 (file)
@@ -5,3 +5,4 @@ nvkm-y += nvkm/subdev/gpio/nv50.o
 nvkm-y += nvkm/subdev/gpio/g94.o
 nvkm-y += nvkm/subdev/gpio/gf119.o
 nvkm-y += nvkm/subdev/gpio/gk104.o
+nvkm-y += nvkm/subdev/gpio/ga102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c
new file mode 100644 (file)
index 0000000..62c791b
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static void
+ga102_gpio_reset(struct nvkm_gpio *gpio, u8 match)
+{
+       struct nvkm_device *device = gpio->subdev.device;
+       struct nvkm_bios *bios = device->bios;
+       u8 ver, len;
+       u16 entry;
+       int ent = -1;
+
+       while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
+               u32 data = nvbios_rd32(bios, entry);
+               u8  line =   (data & 0x0000003f);
+               u8  defs = !!(data & 0x00000080);
+               u8  func =   (data & 0x0000ff00) >> 8;
+               u8  unk0 =   (data & 0x00ff0000) >> 16;
+               u8  unk1 =   (data & 0x1f000000) >> 24;
+
+               if ( func  == DCB_GPIO_UNUSED ||
+                   (match != DCB_GPIO_UNUSED && match != func))
+                       continue;
+
+               nvkm_gpio_set(gpio, 0, func, line, defs);
+
+               nvkm_mask(device, 0x021200 + (line * 4), 0xff, unk0);
+               if (unk1--)
+                       nvkm_mask(device, 0x00d740 + (unk1 * 4), 0xff, line);
+       }
+}
+
+static int
+ga102_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
+{
+       struct nvkm_device *device = gpio->subdev.device;
+       u32 data = ((dir ^ 1) << 13) | (out << 12);
+       nvkm_mask(device, 0x021200 + (line * 4), 0x00003000, data);
+       nvkm_mask(device, 0x00d604, 0x00000001, 0x00000001); /* update? */
+       return 0;
+}
+
+static int
+ga102_gpio_sense(struct nvkm_gpio *gpio, int line)
+{
+       struct nvkm_device *device = gpio->subdev.device;
+       return !!(nvkm_rd32(device, 0x021200 + (line * 4)) & 0x00004000);
+}
+
+static void
+ga102_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
+{
+       struct nvkm_device *device = gpio->subdev.device;
+       u32 intr0 = nvkm_rd32(device, 0x021640);
+       u32 intr1 = nvkm_rd32(device, 0x02164c);
+       u32 stat0 = nvkm_rd32(device, 0x021648) & intr0;
+       u32 stat1 = nvkm_rd32(device, 0x021654) & intr1;
+       *lo = (stat1 & 0xffff0000) | (stat0 >> 16);
+       *hi = (stat1 << 16) | (stat0 & 0x0000ffff);
+       nvkm_wr32(device, 0x021640, intr0);
+       nvkm_wr32(device, 0x02164c, intr1);
+}
+
+static void
+ga102_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
+{
+       struct nvkm_device *device = gpio->subdev.device;
+       u32 inte0 = nvkm_rd32(device, 0x021648);
+       u32 inte1 = nvkm_rd32(device, 0x021654);
+       if (type & NVKM_GPIO_LO)
+               inte0 = (inte0 & ~(mask << 16)) | (data << 16);
+       if (type & NVKM_GPIO_HI)
+               inte0 = (inte0 & ~(mask & 0xffff)) | (data & 0xffff);
+       mask >>= 16;
+       data >>= 16;
+       if (type & NVKM_GPIO_LO)
+               inte1 = (inte1 & ~(mask << 16)) | (data << 16);
+       if (type & NVKM_GPIO_HI)
+               inte1 = (inte1 & ~mask) | data;
+       nvkm_wr32(device, 0x021648, inte0);
+       nvkm_wr32(device, 0x021654, inte1);
+}
+
+static const struct nvkm_gpio_func
+ga102_gpio = {
+       .lines = 32,
+       .intr_stat = ga102_gpio_intr_stat,
+       .intr_mask = ga102_gpio_intr_mask,
+       .drive = ga102_gpio_drive,
+       .sense = ga102_gpio_sense,
+       .reset = ga102_gpio_reset,
+};
+
+int
+ga102_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+       return nvkm_gpio_new_(&ga102_gpio, device, index, pgpio);
+}
index 723d0284caefc7d19757861a16e816c5e7913aff..819703913a00c4db2f0b00c048686acb9393e602 100644 (file)
@@ -7,6 +7,7 @@ nvkm-y += nvkm/subdev/i2c/g94.o
 nvkm-y += nvkm/subdev/i2c/gf117.o
 nvkm-y += nvkm/subdev/i2c/gf119.o
 nvkm-y += nvkm/subdev/i2c/gk104.o
+nvkm-y += nvkm/subdev/i2c/gk110.o
 nvkm-y += nvkm/subdev/i2c/gm200.o
 
 nvkm-y += nvkm/subdev/i2c/pad.o
index 30b48896965ebc981c838087357dbfcc408ef054..f920eabf8628dc5db7219d20cb1829fda2cd5ccb 100644 (file)
@@ -3,6 +3,13 @@
 #define __NVKM_I2C_AUX_H__
 #include "pad.h"
 
+static inline void
+nvkm_i2c_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
+{
+       if (i2c->func->aux_autodpcd)
+               i2c->func->aux_autodpcd(i2c, aux, false);
+}
+
 struct nvkm_i2c_aux_func {
        bool address_only;
        int  (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
index db7769cb33ebadfa10078c05f019c35cd4680338..47068f6f9c55d439eb0397d1fa254659776807a0 100644 (file)
@@ -77,7 +77,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
                 u8 type, u32 addr, u8 *data, u8 *size)
 {
        struct g94_i2c_aux *aux = g94_i2c_aux(obj);
-       struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+       struct nvkm_i2c *i2c = aux->base.pad->i2c;
+       struct nvkm_device *device = i2c->subdev.device;
        const u32 base = aux->ch * 0x50;
        u32 ctrl, stat, timeout, retries = 0;
        u32 xbuf[4] = {};
@@ -96,6 +97,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
                goto out;
        }
 
+       nvkm_i2c_aux_autodpcd(i2c, aux->ch, false);
+
        if (!(type & 1)) {
                memcpy(xbuf, data, *size);
                for (i = 0; i < 16; i += 4) {
@@ -128,7 +131,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
                        if (!timeout--) {
                                AUX_ERR(&aux->base, "timeout %08x", ctrl);
                                ret = -EIO;
-                               goto out;
+                               goto out_err;
                        }
                } while (ctrl & 0x00010000);
                ret = 0;
@@ -154,7 +157,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
                memcpy(data, xbuf, *size);
                *size = stat & 0x0000001f;
        }
-
+out_err:
+       nvkm_i2c_aux_autodpcd(i2c, aux->ch, true);
 out:
        g94_i2c_aux_fini(aux);
        return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
index edb6148cbca042c544939adb2cae2588e90a3e0e..8bd1d442e4654a0f4ff0f75c9c485e64d7562a9d 100644 (file)
@@ -33,7 +33,7 @@ static void
 gm200_i2c_aux_fini(struct gm200_i2c_aux *aux)
 {
        struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
-       nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00310000, 0x00000000);
+       nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00710000, 0x00000000);
 }
 
 static int
@@ -54,10 +54,10 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
                        AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl);
                        return -EBUSY;
                }
-       } while (ctrl & 0x03010000);
+       } while (ctrl & 0x07010000);
 
        /* set some magic, and wait up to 1ms for it to appear */
-       nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00300000, ureq);
+       nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00700000, ureq);
        timeout = 1000;
        do {
                ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50));
@@ -67,7 +67,7 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
                        gm200_i2c_aux_fini(aux);
                        return -EBUSY;
                }
-       } while ((ctrl & 0x03000000) != urep);
+       } while ((ctrl & 0x07000000) != urep);
 
        return 0;
 }
@@ -77,7 +77,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
                   u8 type, u32 addr, u8 *data, u8 *size)
 {
        struct gm200_i2c_aux *aux = gm200_i2c_aux(obj);
-       struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+       struct nvkm_i2c *i2c = aux->base.pad->i2c;
+       struct nvkm_device *device = i2c->subdev.device;
        const u32 base = aux->ch * 0x50;
        u32 ctrl, stat, timeout, retries = 0;
        u32 xbuf[4] = {};
@@ -96,6 +97,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
                goto out;
        }
 
+       nvkm_i2c_aux_autodpcd(i2c, aux->ch, false);
+
        if (!(type & 1)) {
                memcpy(xbuf, data, *size);
                for (i = 0; i < 16; i += 4) {
@@ -128,7 +131,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
                        if (!timeout--) {
                                AUX_ERR(&aux->base, "timeout %08x", ctrl);
                                ret = -EIO;
-                               goto out;
+                               goto out_err;
                        }
                } while (ctrl & 0x00010000);
                ret = 0;
@@ -155,6 +158,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
                *size = stat & 0x0000001f;
        }
 
+out_err:
+       nvkm_i2c_aux_autodpcd(i2c, aux->ch, true);
 out:
        gm200_i2c_aux_fini(aux);
        return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c
new file mode 100644 (file)
index 0000000..8e3bfa1
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "pad.h"
+
+static void
+gk110_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
+{
+       nvkm_mask(i2c->subdev.device, 0x00e4f8 + (aux * 0x50), 0x00010000, enable << 16);
+}
+
+static const struct nvkm_i2c_func
+gk110_i2c = {
+       .pad_x_new = gf119_i2c_pad_x_new,
+       .pad_s_new = gf119_i2c_pad_s_new,
+       .aux = 4,
+       .aux_stat = gk104_aux_stat,
+       .aux_mask = gk104_aux_mask,
+       .aux_autodpcd = gk110_aux_autodpcd,
+};
+
+int
+gk110_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+{
+       return nvkm_i2c_new_(&gk110_i2c, device, index, pi2c);
+}
index a23c5f315221cb68d0230bff8095148a1a4604b0..7b2375bff8a9cc8cc9e43ed0c945db02a8fe75fe 100644 (file)
 #include "priv.h"
 #include "pad.h"
 
+static void
+gm200_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
+{
+       nvkm_mask(i2c->subdev.device, 0x00d968 + (aux * 0x50), 0x00010000, enable << 16);
+}
+
 static const struct nvkm_i2c_func
 gm200_i2c = {
        .pad_x_new = gf119_i2c_pad_x_new,
@@ -31,6 +37,7 @@ gm200_i2c = {
        .aux = 8,
        .aux_stat = gk104_aux_stat,
        .aux_mask = gk104_aux_mask,
+       .aux_autodpcd = gm200_aux_autodpcd,
 };
 
 int
index 461016814f4f272205386e4286b2e20befc8b920..44b7bb7d4777650ce3730868007750f6668dbd08 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: MIT */
 #ifndef __NVKM_I2C_PAD_H__
 #define __NVKM_I2C_PAD_H__
-#include <subdev/i2c.h>
+#include "priv.h"
 
 struct nvkm_i2c_pad {
        const struct nvkm_i2c_pad_func *func;
index bd86bc298ebe54e87bd6cda6fb122f663c244f21..e35f6036fcfcb85edd2d652cec50e858d72a4dfb 100644 (file)
@@ -23,6 +23,10 @@ struct nvkm_i2c_func {
        /* mask on/off interrupt types for a given set of auxch
         */
        void (*aux_mask)(struct nvkm_i2c *, u32, u32, u32);
+
+       /* enable/disable HW-initiated DPCD reads
+        */
+       void (*aux_autodpcd)(struct nvkm_i2c *, int aux, bool enable);
 };
 
 void g94_aux_stat(struct nvkm_i2c *, u32 *, u32 *, u32 *, u32 *);
index 2340040942c937c20dddd4bfd26a686df360596a..1115376bc85f5fd5d28e5cb37185721d73cf5e7d 100644 (file)
@@ -22,6 +22,7 @@
  * Authors: Ben Skeggs
  */
 #include "priv.h"
+#include <subdev/timer.h>
 
 static void
 gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
@@ -31,7 +32,6 @@ gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
        u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400));
        u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400));
        nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
-       nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
 }
 
 static void
@@ -42,7 +42,6 @@ gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
        u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400));
        u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400));
        nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
-       nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
 }
 
 static void
@@ -53,7 +52,6 @@ gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
        u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400));
        u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400));
        nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
-       nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
 }
 
 void
@@ -90,6 +88,12 @@ gf100_ibus_intr(struct nvkm_subdev *ibus)
                        intr1 &= ~stat;
                }
        }
+
+       nvkm_mask(device, 0x121c4c, 0x0000003f, 0x00000002);
+       nvkm_msec(device, 2000,
+               if (!(nvkm_rd32(device, 0x121c4c) & 0x0000003f))
+                       break;
+       );
 }
 
 static int
index f3915f85838ed400e4dc3d4a5bc5b36f12b5591f..22e487b493ad1346851a58c02599ceb97011a798 100644 (file)
@@ -22,6 +22,7 @@
  * Authors: Ben Skeggs
  */
 #include "priv.h"
+#include <subdev/timer.h>
 
 static void
 gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
@@ -31,7 +32,6 @@ gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
        u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800));
        u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800));
        nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
-       nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
 }
 
 static void
@@ -42,7 +42,6 @@ gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
        u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800));
        u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800));
        nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
-       nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
 }
 
 static void
@@ -53,7 +52,6 @@ gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
        u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800));
        u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800));
        nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
-       nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
 }
 
 void
@@ -90,6 +88,12 @@ gk104_ibus_intr(struct nvkm_subdev *ibus)
                        intr1 &= ~stat;
                }
        }
+
+       nvkm_mask(device, 0x12004c, 0x0000003f, 0x00000002);
+       nvkm_msec(device, 2000,
+               if (!(nvkm_rd32(device, 0x12004c) & 0x0000003f))
+                       break;
+       );
 }
 
 static int
index 2585ef07532ac094a2f888ffb38d503a846572b3..ac2b34e9ac6add2ca24bff686f8799bcf39ff7ac 100644 (file)
@@ -14,3 +14,4 @@ nvkm-y += nvkm/subdev/mc/gk20a.o
 nvkm-y += nvkm/subdev/mc/gp100.o
 nvkm-y += nvkm/subdev/mc/gp10b.o
 nvkm-y += nvkm/subdev/mc/tu102.o
+nvkm-y += nvkm/subdev/mc/ga100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c
new file mode 100644 (file)
index 0000000..967eb3a
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static void
+ga100_mc_intr_unarm(struct nvkm_mc *mc)
+{
+       nvkm_wr32(mc->subdev.device, 0xb81610, 0x00000004);
+}
+
+static void
+ga100_mc_intr_rearm(struct nvkm_mc *mc)
+{
+       nvkm_wr32(mc->subdev.device, 0xb81608, 0x00000004);
+}
+
+static void
+ga100_mc_intr_mask(struct nvkm_mc *mc, u32 mask, u32 intr)
+{
+       nvkm_wr32(mc->subdev.device, 0xb81210,          mask & intr );
+       nvkm_wr32(mc->subdev.device, 0xb81410, mask & ~(mask & intr));
+}
+
+static u32
+ga100_mc_intr_stat(struct nvkm_mc *mc)
+{
+       u32 intr_top = nvkm_rd32(mc->subdev.device, 0xb81600), intr = 0x00000000;
+       if (intr_top & 0x00000004)
+               intr = nvkm_mask(mc->subdev.device, 0xb81010, 0x00000000, 0x00000000);
+       return intr;
+}
+
+static void
+ga100_mc_init(struct nvkm_mc *mc)
+{
+       nv50_mc_init(mc);
+       nvkm_wr32(mc->subdev.device, 0xb81210, 0xffffffff);
+}
+
+static const struct nvkm_mc_func
+ga100_mc = {
+       .init = ga100_mc_init,
+       .intr = gp100_mc_intr,
+       .intr_unarm = ga100_mc_intr_unarm,
+       .intr_rearm = ga100_mc_intr_rearm,
+       .intr_mask = ga100_mc_intr_mask,
+       .intr_stat = ga100_mc_intr_stat,
+       .reset = gk104_mc_reset,
+};
+
+int
+ga100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+       return nvkm_mc_new_(&ga100_mc, device, index, pmc);
+}
index de91e9a2617258cbcc93e7ea027a9d4a77a1f99f..6d5212ae2fd57b741715e7d7c9e4c83fe5bdd13e 100644 (file)
@@ -316,9 +316,9 @@ nvkm_mmu_vram(struct nvkm_mmu *mmu)
 {
        struct nvkm_device *device = mmu->subdev.device;
        struct nvkm_mm *mm = &device->fb->ram->vram;
-       const u32 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
-       const u32 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
-       const u32 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
+       const u64 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
+       const u64 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
+       const u64 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
        u8 type = NVKM_MEM_KIND * !!mmu->func->kind;
        u8 heap = NVKM_MEM_VRAM;
        int heapM, heapN, heapU;
index d59ef6e92a40276abc69e51adbfcaa0ab0f08869..23195d5d4e9191bce05d2eeeded4ed1196f6684b 100644 (file)
@@ -730,9 +730,6 @@ int radeon_ttm_init(struct radeon_device *rdev)
        }
        rdev->mman.initialized = true;
 
-       ttm_pool_init(&rdev->mman.bdev.pool, rdev->dev, rdev->need_swiotlb,
-                     dma_addressing_limited(&rdev->pdev->dev));
-
        r = radeon_ttm_init_vram(rdev);
        if (r) {
                DRM_ERROR("Failed initializing VRAM heap.\n");
index 7b2f60616750d1f69b59c87bb06bd3a764709663..8cd776adc592be46dff106dc8fe25b1219015502 100644 (file)
@@ -66,7 +66,7 @@ static struct ttm_pool_type global_uncached[MAX_ORDER];
 static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
 static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
 
-static spinlock_t shrinker_lock;
+static struct mutex shrinker_lock;
 static struct list_head shrinker_list;
 static struct shrinker mm_shrinker;
 
@@ -190,7 +190,7 @@ static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
                size_t size = (1ULL << order) * PAGE_SIZE;
 
                addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
-               if (dma_mapping_error(pool->dev, **dma_addr))
+               if (dma_mapping_error(pool->dev, addr))
                        return -EFAULT;
        }
 
@@ -249,9 +249,9 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
        spin_lock_init(&pt->lock);
        INIT_LIST_HEAD(&pt->pages);
 
-       spin_lock(&shrinker_lock);
+       mutex_lock(&shrinker_lock);
        list_add_tail(&pt->shrinker_list, &shrinker_list);
-       spin_unlock(&shrinker_lock);
+       mutex_unlock(&shrinker_lock);
 }
 
 /* Remove a pool_type from the global shrinker list and free all pages */
@@ -259,9 +259,9 @@ static void ttm_pool_type_fini(struct ttm_pool_type *pt)
 {
        struct page *p, *tmp;
 
-       spin_lock(&shrinker_lock);
+       mutex_lock(&shrinker_lock);
        list_del(&pt->shrinker_list);
-       spin_unlock(&shrinker_lock);
+       mutex_unlock(&shrinker_lock);
 
        list_for_each_entry_safe(p, tmp, &pt->pages, lru)
                ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
@@ -302,7 +302,7 @@ static unsigned int ttm_pool_shrink(void)
        unsigned int num_freed;
        struct page *p;
 
-       spin_lock(&shrinker_lock);
+       mutex_lock(&shrinker_lock);
        pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
 
        p = ttm_pool_type_take(pt);
@@ -314,7 +314,7 @@ static unsigned int ttm_pool_shrink(void)
        }
 
        list_move_tail(&pt->shrinker_list, &shrinker_list);
-       spin_unlock(&shrinker_lock);
+       mutex_unlock(&shrinker_lock);
 
        return num_freed;
 }
@@ -507,7 +507,6 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
                        ttm_pool_type_init(&pool->caching[i].orders[j],
                                           pool, i, j);
 }
-EXPORT_SYMBOL(ttm_pool_init);
 
 /**
  * ttm_pool_fini - Cleanup a pool
@@ -525,7 +524,6 @@ void ttm_pool_fini(struct ttm_pool *pool)
                for (j = 0; j < MAX_ORDER; ++j)
                        ttm_pool_type_fini(&pool->caching[i].orders[j]);
 }
-EXPORT_SYMBOL(ttm_pool_fini);
 
 #ifdef CONFIG_DEBUG_FS
 /* Count the number of pages available in a pool_type */
@@ -566,7 +564,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
 {
        unsigned int i;
 
-       spin_lock(&shrinker_lock);
+       mutex_lock(&shrinker_lock);
 
        seq_puts(m, "\t ");
        for (i = 0; i < MAX_ORDER; ++i)
@@ -602,7 +600,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
        seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
                   atomic_long_read(&allocated_pages), page_pool_size);
 
-       spin_unlock(&shrinker_lock);
+       mutex_unlock(&shrinker_lock);
 
        return 0;
 }
@@ -646,7 +644,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
        if (!page_pool_size)
                page_pool_size = num_pages;
 
-       spin_lock_init(&shrinker_lock);
+       mutex_init(&shrinker_lock);
        INIT_LIST_HEAD(&shrinker_list);
 
        for (i = 0; i < MAX_ORDER; ++i) {
index 7bdda1b5b2217420cc1fe9860b7477b8bfac5749..09fa75a2b289e5e31c457d85fa4c6db90c3e86ed 100644 (file)
@@ -899,6 +899,7 @@ config HID_SONY
        depends on NEW_LEDS
        depends on LEDS_CLASS
        select POWER_SUPPLY
+       select CRC32
        help
        Support for
 
index 3d1ccac5d99a34a519dc5148e07780807d4a2b98..2ab38b715347717142c9805a902869c25fc10cf6 100644 (file)
@@ -154,7 +154,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
 
        for (i = 0; i < cl_data->num_hid_devices; i++) {
                cl_data->sensor_virt_addr[i] = dma_alloc_coherent(dev, sizeof(int) * 8,
-                                                                 &cl_data->sensor_phys_addr[i],
+                                                                 &cl_data->sensor_dma_addr[i],
                                                                  GFP_KERNEL);
                cl_data->sensor_sts[i] = 0;
                cl_data->sensor_requested_cnt[i] = 0;
@@ -187,7 +187,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
                }
                info.period = msecs_to_jiffies(AMD_SFH_IDLE_LOOP);
                info.sensor_idx = cl_idx;
-               info.phys_address = cl_data->sensor_phys_addr[i];
+               info.dma_address = cl_data->sensor_dma_addr[i];
 
                cl_data->report_descr[i] = kzalloc(cl_data->report_descr_sz[i], GFP_KERNEL);
                if (!cl_data->report_descr[i]) {
@@ -212,7 +212,7 @@ cleanup:
                if (cl_data->sensor_virt_addr[i]) {
                        dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int),
                                          cl_data->sensor_virt_addr[i],
-                                         cl_data->sensor_phys_addr[i]);
+                                         cl_data->sensor_dma_addr[i]);
                }
                kfree(cl_data->feature_report[i]);
                kfree(cl_data->input_report[i]);
@@ -238,7 +238,7 @@ int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
                if (cl_data->sensor_virt_addr[i]) {
                        dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int),
                                          cl_data->sensor_virt_addr[i],
-                                         cl_data->sensor_phys_addr[i]);
+                                         cl_data->sensor_dma_addr[i]);
                }
        }
        kfree(cl_data);
index 6be0783d885ce5f91e808760dfc9f152c9bcb220..d7eac1728e314add855c917ba5e07edf4e95c443 100644 (file)
@@ -27,7 +27,7 @@ struct amdtp_cl_data {
        int hid_descr_size[MAX_HID_DEVICES];
        phys_addr_t phys_addr_base;
        u32 *sensor_virt_addr[MAX_HID_DEVICES];
-       phys_addr_t sensor_phys_addr[MAX_HID_DEVICES];
+       dma_addr_t sensor_dma_addr[MAX_HID_DEVICES];
        u32 sensor_sts[MAX_HID_DEVICES];
        u32 sensor_requested_cnt[MAX_HID_DEVICES];
        u8 report_type[MAX_HID_DEVICES];
index a51c7b76283bb46b156fe405cf487a986d1cdd3e..dbac1664166277ba2117b2dde68677699200cfd6 100644 (file)
@@ -41,7 +41,7 @@ void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info i
        cmd_param.s.buf_layout = 1;
        cmd_param.s.buf_length = 16;
 
-       writeq(info.phys_address, privdata->mmio + AMD_C2P_MSG2);
+       writeq(info.dma_address, privdata->mmio + AMD_C2P_MSG2);
        writel(cmd_param.ul, privdata->mmio + AMD_C2P_MSG1);
        writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
 }
index e8be94f935b78f862bd7cb0da3e07613b7e6ae49..8f8d19b2cfe5ba611ac62ea0fdf63327abe74a97 100644 (file)
@@ -67,7 +67,7 @@ struct amd_mp2_dev {
 struct amd_mp2_sensor_info {
        u8 sensor_idx;
        u32 period;
-       phys_addr_t phys_address;
+       dma_addr_t dma_address;
 };
 
 void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info);
index 4c5f23640f9c7434260346a41d3036d32ddd9da7..5ba0aa1d2335351111836d97520cea279ade0175 100644 (file)
 #define USB_DEVICE_ID_TOSHIBA_CLICK_L9W        0x0401
 #define USB_DEVICE_ID_HP_X2            0x074d
 #define USB_DEVICE_ID_HP_X2_10_COVER   0x0755
+#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN   0x2706
 
 #define USB_VENDOR_ID_ELECOM           0x056e
 #define USB_DEVICE_ID_ELECOM_BM084     0x0061
index dc7f6b4a775c982a3f966480526083287d717534..f23027d2795ba0b816aabfb74200b1cfb09f9566 100644 (file)
@@ -322,6 +322,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
                USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD),
          HID_BATTERY_QUIRK_IGNORE },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
+         HID_BATTERY_QUIRK_IGNORE },
        {}
 };
 
index 1ffcfc9a1e033b9015a2c88ce72b2b5710e4ee49..45e7e0bdd382bd736f51fecba28ecd370f5eba2d 100644 (file)
@@ -1869,6 +1869,10 @@ static const struct hid_device_id logi_dj_receivers[] = {
          HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
                0xc531),
         .driver_data = recvr_type_gaming_hidpp},
+       { /* Logitech G602 receiver (0xc537) */
+         HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+               0xc537),
+        .driver_data = recvr_type_gaming_hidpp},
        { /* Logitech lightspeed receiver (0xc539) */
          HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
                USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1),
index f85781464807d130b41797b2b57274438a7e4ca9..7eb9a6ddb46a67e7d06866b923f0bf0994c7b1b9 100644 (file)
@@ -4053,6 +4053,8 @@ static const struct hid_device_id hidpp_devices[] = {
        { /* MX Master mouse over Bluetooth */
          HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012),
          .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+       { /* MX Ergo trackball over Bluetooth */
+         HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01d) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e),
          .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
        { /* MX Master 3 mouse over Bluetooth */
index d670bcd57bdef88870661a81fd9179d72a3077cd..0743ef51d3b246ab2086ca3dda387a842b79e19e 100644 (file)
@@ -2054,6 +2054,10 @@ static const struct hid_device_id mt_devices[] = {
                HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
                        USB_VENDOR_ID_SYNAPTICS, 0xce08) },
 
+       { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+               HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+                       USB_VENDOR_ID_SYNAPTICS, 0xce09) },
+
        /* TopSeed panels */
        { .driver_data = MT_CLS_TOPSEED,
                MT_USB_DEVICE(USB_VENDOR_ID_TOPSEED2,
index d26d8cd98efcfddc869155a07779828c54eee6bc..56406cee401fffcfc4b63f8cd6dc926b7b93dfbb 100644 (file)
@@ -90,7 +90,7 @@ static int uclogic_params_get_str_desc(__u8 **pbuf, struct hid_device *hdev,
                goto cleanup;
        } else if (rc < 0) {
                hid_err(hdev,
-                       "failed retrieving string descriptor #%hhu: %d\n",
+                       "failed retrieving string descriptor #%u: %d\n",
                        idx, rc);
                goto cleanup;
        }
index 41012681cafd1ec469d9bfb19930cf1d4a5c6d00..4399d6c6afef2c2bdb22eb013e4b2ac1a729284e 100644 (file)
@@ -1482,7 +1482,7 @@ static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
                wdata->state.cmd_err = err;
                wiimote_cmd_complete(wdata);
        } else if (err) {
-               hid_warn(wdata->hdev, "Remote error %hhu on req %hhu\n", err,
+               hid_warn(wdata->hdev, "Remote error %u on req %u\n", err,
                                                                        cmd);
        }
 }
index 045c464228d91f6a9bb8cba52fc5360ec7331013..e8acd235db2a90f156ec516d37990921a759e0db 100644 (file)
@@ -1270,6 +1270,37 @@ static int wacom_devm_sysfs_create_group(struct wacom *wacom,
                                               group);
 }
 
+static void wacom_devm_kfifo_release(struct device *dev, void *res)
+{
+       struct kfifo_rec_ptr_2 *devres = res;
+
+       kfifo_free(devres);
+}
+
+static int wacom_devm_kfifo_alloc(struct wacom *wacom)
+{
+       struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+       struct kfifo_rec_ptr_2 *pen_fifo = &wacom_wac->pen_fifo;
+       int error;
+
+       pen_fifo = devres_alloc(wacom_devm_kfifo_release,
+                             sizeof(struct kfifo_rec_ptr_2),
+                             GFP_KERNEL);
+
+       if (!pen_fifo)
+               return -ENOMEM;
+
+       error = kfifo_alloc(pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
+       if (error) {
+               devres_free(pen_fifo);
+               return error;
+       }
+
+       devres_add(&wacom->hdev->dev, pen_fifo);
+
+       return 0;
+}
+
 enum led_brightness wacom_leds_brightness_get(struct wacom_led *led)
 {
        struct wacom *wacom = led->wacom;
@@ -2724,7 +2755,7 @@ static int wacom_probe(struct hid_device *hdev,
        if (features->check_for_hid_type && features->hid_type != hdev->type)
                return -ENODEV;
 
-       error = kfifo_alloc(&wacom_wac->pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
+       error = wacom_devm_kfifo_alloc(wacom);
        if (error)
                return error;
 
@@ -2786,8 +2817,6 @@ static void wacom_remove(struct hid_device *hdev)
 
        if (wacom->wacom_wac.features.type != REMOTE)
                wacom_release_resources(wacom);
-
-       kfifo_free(&wacom_wac->pen_fifo);
 }
 
 #ifdef CONFIG_PM
index 502f8cd95f6d46cb6db5923ba877612f886d4a7a..d491fdcee61f025e37c0a3551c29b555ce5a2b87 100644 (file)
@@ -2550,7 +2550,6 @@ static void hv_kexec_handler(void)
        /* Make sure conn_state is set as hv_synic_cleanup checks for it */
        mb();
        cpuhp_remove_state(hyperv_cpuhp_online);
-       hyperv_cleanup();
 };
 
 static void hv_crash_handler(struct pt_regs *regs)
@@ -2566,7 +2565,6 @@ static void hv_crash_handler(struct pt_regs *regs)
        cpu = smp_processor_id();
        hv_stimer_cleanup(cpu);
        hv_synic_disable_regs(cpu);
-       hyperv_cleanup();
 };
 
 static int hv_synic_suspend(void)
index 9b306448b7a0f57ae45cacd4090ef4aaa7a1e2f5..822c2e74b98d4713927136d3e2344bb96244f7fb 100644 (file)
@@ -222,7 +222,7 @@ static int amd_create_sensor(struct device *dev,
         */
        cpus = num_present_cpus() / num_siblings;
 
-       s_config = devm_kcalloc(dev, cpus + sockets,
+       s_config = devm_kcalloc(dev, cpus + sockets + 1,
                                sizeof(u32), GFP_KERNEL);
        if (!s_config)
                return -ENOMEM;
@@ -254,6 +254,7 @@ static int amd_create_sensor(struct device *dev,
                        scnprintf(label_l[i], 10, "Esocket%u", (i - cpus));
        }
 
+       s_config[i] = 0;
        return 0;
 }
 
index 777439f48c1471a1cbd02d5b32208367ddf44b2d..111a91dc6b798e31ae6ff9998e4eea0428943bc5 100644 (file)
@@ -334,8 +334,18 @@ static int pwm_fan_probe(struct platform_device *pdev)
 
        ctx->pwm_value = MAX_PWM;
 
-       /* Set duty cycle to maximum allowed and enable PWM output */
        pwm_init_state(ctx->pwm, &state);
+       /*
+        * __set_pwm assumes that MAX_PWM * (period - 1) fits into an unsigned
+        * long. Check this here to prevent the fan running at a too low
+        * frequency.
+        */
+       if (state.period > ULONG_MAX / MAX_PWM + 1) {
+               dev_err(dev, "Configured period too big\n");
+               return -EINVAL;
+       }
+
+       /* Set duty cycle to maximum allowed and enable PWM output */
        state.duty_cycle = ctx->pwm->args.period - 1;
        state.enabled = true;
 
index ae90713443fa61aa1644fcb80412a500c23ed2c1..877fe3733a42b211f7fd039a305fa39ef0e270dd 100644 (file)
@@ -1449,7 +1449,7 @@ static int i801_add_mux(struct i801_priv *priv)
 
        /* Register GPIO descriptor lookup table */
        lookup = devm_kzalloc(dev,
-                             struct_size(lookup, table, mux_config->n_gpios),
+                             struct_size(lookup, table, mux_config->n_gpios + 1),
                              GFP_KERNEL);
        if (!lookup)
                return -ENOMEM;
index 33de99b7bc20c0ea215991c43efa1f89529e474e..0818d3e50734771b854b875b78bcc60c37ded121 100644 (file)
@@ -38,6 +38,7 @@
 #define I2C_IO_CONFIG_OPEN_DRAIN       0x0003
 #define I2C_IO_CONFIG_PUSH_PULL                0x0000
 #define I2C_SOFT_RST                   0x0001
+#define I2C_HANDSHAKE_RST              0x0020
 #define I2C_FIFO_ADDR_CLR              0x0001
 #define I2C_DELAY_LEN                  0x0002
 #define I2C_TIME_CLR_VALUE             0x0000
@@ -45,6 +46,7 @@
 #define I2C_WRRD_TRANAC_VALUE          0x0002
 #define I2C_RD_TRANAC_VALUE            0x0001
 #define I2C_SCL_MIS_COMP_VALUE         0x0000
+#define I2C_CHN_CLR_FLAG               0x0000
 
 #define I2C_DMA_CON_TX                 0x0000
 #define I2C_DMA_CON_RX                 0x0001
@@ -54,7 +56,9 @@
 #define I2C_DMA_START_EN               0x0001
 #define I2C_DMA_INT_FLAG_NONE          0x0000
 #define I2C_DMA_CLR_FLAG               0x0000
+#define I2C_DMA_WARM_RST               0x0001
 #define I2C_DMA_HARD_RST               0x0002
+#define I2C_DMA_HANDSHAKE_RST          0x0004
 
 #define MAX_SAMPLE_CNT_DIV             8
 #define MAX_STEP_CNT_DIV               64
@@ -475,11 +479,24 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
 {
        u16 control_reg;
 
-       writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
-       udelay(50);
-       writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
-
-       mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
+       if (i2c->dev_comp->dma_sync) {
+               writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
+               udelay(10);
+               writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+               udelay(10);
+               writel(I2C_DMA_HANDSHAKE_RST | I2C_DMA_HARD_RST,
+                      i2c->pdmabase + OFFSET_RST);
+               mtk_i2c_writew(i2c, I2C_HANDSHAKE_RST | I2C_SOFT_RST,
+                              OFFSET_SOFTRESET);
+               udelay(10);
+               writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+               mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_SOFTRESET);
+       } else {
+               writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
+               udelay(50);
+               writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+               mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
+       }
 
        /* Set ioconfig */
        if (i2c->use_push_pull)
index 19cda6742423d98ab3d53a897d1897ac6e93eae4..2917fecf6c80d08b118cd0c1ec70da3e0483ecd1 100644 (file)
@@ -72,6 +72,8 @@
 
 /* timeout (ms) for pm runtime autosuspend */
 #define SPRD_I2C_PM_TIMEOUT    1000
+/* timeout (ms) for transfer message */
+#define I2C_XFER_TIMEOUT       1000
 
 /* SPRD i2c data structure */
 struct sprd_i2c {
@@ -244,6 +246,7 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap,
                               struct i2c_msg *msg, bool is_last_msg)
 {
        struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
+       unsigned long time_left;
 
        i2c_dev->msg = msg;
        i2c_dev->buf = msg->buf;
@@ -273,7 +276,10 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap,
 
        sprd_i2c_opt_start(i2c_dev);
 
-       wait_for_completion(&i2c_dev->complete);
+       time_left = wait_for_completion_timeout(&i2c_dev->complete,
+                               msecs_to_jiffies(I2C_XFER_TIMEOUT));
+       if (!time_left)
+               return -ETIMEDOUT;
 
        return i2c_dev->err;
 }
index 2162bc80f09e02ff2daa35362436de1dbb802217..013ad33fbbc81ee507148df1aeffde66613957d2 100644 (file)
@@ -223,7 +223,6 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
        sense_rq->rq_disk = rq->rq_disk;
        sense_rq->cmd_flags = REQ_OP_DRV_IN;
        ide_req(sense_rq)->type = ATA_PRIV_SENSE;
-       sense_rq->rq_flags |= RQF_PREEMPT;
 
        req->cmd[0] = GPCMD_REQUEST_SENSE;
        req->cmd[4] = cmd_len;
index 1a53c7a752244bf0037acf556471788117a23bfe..4867b67b60d698c464bd9f99f248f2e846b9e524 100644 (file)
@@ -515,15 +515,10 @@ repeat:
                 * above to return us whatever is in the queue. Since we call
                 * ide_do_request() ourselves, we end up taking requests while
                 * the queue is blocked...
-                * 
-                * We let requests forced at head of queue with ide-preempt
-                * though. I hope that doesn't happen too much, hopefully not
-                * unless the subdriver triggers such a thing in its own PM
-                * state machine.
                 */
                if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
                    ata_pm_request(rq) == 0 &&
-                   (rq->rq_flags & RQF_PREEMPT) == 0) {
+                   (rq->rq_flags & RQF_PM) == 0) {
                        /* there should be no pending command at this point */
                        ide_unlock_port(hwif);
                        goto plug_device;
index 192e6c65d34e7a0aff073003c2e4039d681f87f2..82ab308f1aafe007b0e2420e0065eeefd19acf6f 100644 (file)
@@ -77,7 +77,7 @@ int generic_ide_resume(struct device *dev)
        }
 
        memset(&rqpm, 0, sizeof(rqpm));
-       rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PREEMPT);
+       rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PM);
        ide_req(rq)->type = ATA_PRIV_PM_RESUME;
        ide_req(rq)->special = &rqpm;
        rqpm.pm_step = IDE_PM_START_RESUME;
index d79335506ecd3c3f5aa3beabde8e0dd74cef10a3..28f93b9aa51bf7933cc92c97b7455dc7ba9338ef 100644 (file)
@@ -963,6 +963,39 @@ static struct cpuidle_state dnv_cstates[] __initdata = {
                .enter = NULL }
 };
 
+/*
+ * Note, depending on HW and FW revision, SnowRidge SoC may or may not support
+ * C6, and this is indicated in the CPUID mwait leaf.
+ */
+static struct cpuidle_state snr_cstates[] __initdata = {
+       {
+               .name = "C1",
+               .desc = "MWAIT 0x00",
+               .flags = MWAIT2flg(0x00),
+               .exit_latency = 2,
+               .target_residency = 2,
+               .enter = &intel_idle,
+               .enter_s2idle = intel_idle_s2idle, },
+       {
+               .name = "C1E",
+               .desc = "MWAIT 0x01",
+               .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
+               .exit_latency = 15,
+               .target_residency = 25,
+               .enter = &intel_idle,
+               .enter_s2idle = intel_idle_s2idle, },
+       {
+               .name = "C6",
+               .desc = "MWAIT 0x20",
+               .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 130,
+               .target_residency = 500,
+               .enter = &intel_idle,
+               .enter_s2idle = intel_idle_s2idle, },
+       {
+               .enter = NULL }
+};
+
 static const struct idle_cpu idle_cpu_nehalem __initconst = {
        .state_table = nehalem_cstates,
        .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
@@ -1084,6 +1117,12 @@ static const struct idle_cpu idle_cpu_dnv __initconst = {
        .use_acpi = true,
 };
 
+static const struct idle_cpu idle_cpu_snr __initconst = {
+       .state_table = snr_cstates,
+       .disable_promotion_to_c1e = true,
+       .use_acpi = true,
+};
+
 static const struct x86_cpu_id intel_idle_ids[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP,          &idle_cpu_nhx),
        X86_MATCH_INTEL_FAM6_MODEL(NEHALEM,             &idle_cpu_nehalem),
@@ -1122,7 +1161,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT,       &idle_cpu_bxt),
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS,  &idle_cpu_bxt),
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D,     &idle_cpu_dnv),
-       X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D,      &idle_cpu_dnv),
+       X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D,      &idle_cpu_snr),
        {}
 };
 
index 7f70e5a7de10543318f1afe420193e8f42084c59..97a77ea8d3c9c1048b27dd90b68e3b90b3809c33 100644 (file)
@@ -131,8 +131,10 @@ static ssize_t default_roce_mode_store(struct config_item *item,
                return ret;
 
        gid_type = ib_cache_gid_parse_type_str(buf);
-       if (gid_type < 0)
+       if (gid_type < 0) {
+               cma_configfs_params_put(cma_dev);
                return -EINVAL;
+       }
 
        ret = cma_set_default_gid_type(cma_dev, group->port_num, gid_type);
 
index e0a41c8670023a4ef2df594248353fbfef4e28cb..ff1551b3cf619862d66077b8a7b6f12d3175147b 100644 (file)
@@ -254,6 +254,7 @@ void rdma_restrack_add(struct rdma_restrack_entry *res)
        } else {
                ret = xa_alloc_cyclic(&rt->xa, &res->id, res, xa_limit_32b,
                                      &rt->next_id, GFP_KERNEL);
+               ret = (ret < 0) ? ret : 0;
        }
 
 out:
index 7dab9a27a145a8eee04150c3caa11f2988bae584..da2512c30ffd5276c02e651638c3db89d403287f 100644 (file)
@@ -95,8 +95,6 @@ struct ucma_context {
        u64                     uid;
 
        struct list_head        list;
-       /* sync between removal event and id destroy, protected by file mut */
-       int                     destroying;
        struct work_struct      close_work;
 };
 
@@ -122,7 +120,7 @@ static DEFINE_XARRAY_ALLOC(ctx_table);
 static DEFINE_XARRAY_ALLOC(multicast_table);
 
 static const struct file_operations ucma_fops;
-static int __destroy_id(struct ucma_context *ctx);
+static int ucma_destroy_private_ctx(struct ucma_context *ctx);
 
 static inline struct ucma_context *_ucma_find_context(int id,
                                                      struct ucma_file *file)
@@ -179,19 +177,14 @@ static void ucma_close_id(struct work_struct *work)
 
        /* once all inflight tasks are finished, we close all underlying
         * resources. The context is still alive till its explicit destryoing
-        * by its creator.
+        * by its creator. This puts back the xarray's reference.
         */
        ucma_put_ctx(ctx);
        wait_for_completion(&ctx->comp);
        /* No new events will be generated after destroying the id. */
        rdma_destroy_id(ctx->cm_id);
 
-       /*
-        * At this point ctx->ref is zero so the only place the ctx can be is in
-        * a uevent or in __destroy_id(). Since the former doesn't touch
-        * ctx->cm_id and the latter sync cancels this, there is no races with
-        * this store.
-        */
+       /* Reading the cm_id without holding a positive ref is not allowed */
        ctx->cm_id = NULL;
 }
 
@@ -204,7 +197,6 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
                return NULL;
 
        INIT_WORK(&ctx->close_work, ucma_close_id);
-       refcount_set(&ctx->ref, 1);
        init_completion(&ctx->comp);
        /* So list_del() will work if we don't do ucma_finish_ctx() */
        INIT_LIST_HEAD(&ctx->list);
@@ -218,6 +210,13 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
        return ctx;
 }
 
+static void ucma_set_ctx_cm_id(struct ucma_context *ctx,
+                              struct rdma_cm_id *cm_id)
+{
+       refcount_set(&ctx->ref, 1);
+       ctx->cm_id = cm_id;
+}
+
 static void ucma_finish_ctx(struct ucma_context *ctx)
 {
        lockdep_assert_held(&ctx->file->mut);
@@ -303,7 +302,7 @@ static int ucma_connect_event_handler(struct rdma_cm_id *cm_id,
        ctx = ucma_alloc_ctx(listen_ctx->file);
        if (!ctx)
                goto err_backlog;
-       ctx->cm_id = cm_id;
+       ucma_set_ctx_cm_id(ctx, cm_id);
 
        uevent = ucma_create_uevent(listen_ctx, event);
        if (!uevent)
@@ -321,8 +320,7 @@ static int ucma_connect_event_handler(struct rdma_cm_id *cm_id,
        return 0;
 
 err_alloc:
-       xa_erase(&ctx_table, ctx->id);
-       kfree(ctx);
+       ucma_destroy_private_ctx(ctx);
 err_backlog:
        atomic_inc(&listen_ctx->backlog);
        /* Returning error causes the new ID to be destroyed */
@@ -356,8 +354,12 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
                wake_up_interruptible(&ctx->file->poll_wait);
        }
 
-       if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL && !ctx->destroying)
-               queue_work(system_unbound_wq, &ctx->close_work);
+       if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) {
+               xa_lock(&ctx_table);
+               if (xa_load(&ctx_table, ctx->id) == ctx)
+                       queue_work(system_unbound_wq, &ctx->close_work);
+               xa_unlock(&ctx_table);
+       }
        return 0;
 }
 
@@ -461,13 +463,12 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
                ret = PTR_ERR(cm_id);
                goto err1;
        }
-       ctx->cm_id = cm_id;
+       ucma_set_ctx_cm_id(ctx, cm_id);
 
        resp.id = ctx->id;
        if (copy_to_user(u64_to_user_ptr(cmd.response),
                         &resp, sizeof(resp))) {
-               xa_erase(&ctx_table, ctx->id);
-               __destroy_id(ctx);
+               ucma_destroy_private_ctx(ctx);
                return -EFAULT;
        }
 
@@ -477,8 +478,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
        return 0;
 
 err1:
-       xa_erase(&ctx_table, ctx->id);
-       kfree(ctx);
+       ucma_destroy_private_ctx(ctx);
        return ret;
 }
 
@@ -516,68 +516,73 @@ static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
        rdma_unlock_handler(mc->ctx->cm_id);
 }
 
-/*
- * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
- * this point, no new events will be reported from the hardware. However, we
- * still need to cleanup the UCMA context for this ID. Specifically, there
- * might be events that have not yet been consumed by the user space software.
- * mutex. After that we release them as needed.
- */
-static int ucma_free_ctx(struct ucma_context *ctx)
+static int ucma_cleanup_ctx_events(struct ucma_context *ctx)
 {
        int events_reported;
        struct ucma_event *uevent, *tmp;
        LIST_HEAD(list);
 
-       ucma_cleanup_multicast(ctx);
-
-       /* Cleanup events not yet reported to the user. */
+       /* Cleanup events not yet reported to the user.*/
        mutex_lock(&ctx->file->mut);
        list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
-               if (uevent->ctx == ctx || uevent->conn_req_ctx == ctx)
+               if (uevent->ctx != ctx)
+                       continue;
+
+               if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST &&
+                   xa_cmpxchg(&ctx_table, uevent->conn_req_ctx->id,
+                              uevent->conn_req_ctx, XA_ZERO_ENTRY,
+                              GFP_KERNEL) == uevent->conn_req_ctx) {
                        list_move_tail(&uevent->list, &list);
+                       continue;
+               }
+               list_del(&uevent->list);
+               kfree(uevent);
        }
        list_del(&ctx->list);
        events_reported = ctx->events_reported;
        mutex_unlock(&ctx->file->mut);
 
        /*
-        * If this was a listening ID then any connections spawned from it
-        * that have not been delivered to userspace are cleaned up too.
-        * Must be done outside any locks.
+        * If this was a listening ID then any connections spawned from it that
+        * have not been delivered to userspace are cleaned up too. Must be done
+        * outside any locks.
         */
        list_for_each_entry_safe(uevent, tmp, &list, list) {
-               list_del(&uevent->list);
-               if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST &&
-                   uevent->conn_req_ctx != ctx)
-                       __destroy_id(uevent->conn_req_ctx);
+               ucma_destroy_private_ctx(uevent->conn_req_ctx);
                kfree(uevent);
        }
-
-       mutex_destroy(&ctx->mutex);
-       kfree(ctx);
        return events_reported;
 }
 
-static int __destroy_id(struct ucma_context *ctx)
+/*
+ * When this is called the xarray must have a XA_ZERO_ENTRY in the ctx->id (ie
+ * the ctx is not public to the user). This either because:
+ *  - ucma_finish_ctx() hasn't been called
+ *  - xa_cmpxchg() succeed to remove the entry (only one thread can succeed)
+ */
+static int ucma_destroy_private_ctx(struct ucma_context *ctx)
 {
+       int events_reported;
+
        /*
-        * If the refcount is already 0 then ucma_close_id() has already
-        * destroyed the cm_id, otherwise holding the refcount keeps cm_id
-        * valid. Prevent queue_work() from being called.
+        * Destroy the underlying cm_id. New work queuing is prevented now by
+        * the removal from the xarray. Once the work is cancled ref will either
+        * be 0 because the work ran to completion and consumed the ref from the
+        * xarray, or it will be positive because we still have the ref from the
+        * xarray. This can also be 0 in cases where cm_id was never set
         */
-       if (refcount_inc_not_zero(&ctx->ref)) {
-               rdma_lock_handler(ctx->cm_id);
-               ctx->destroying = 1;
-               rdma_unlock_handler(ctx->cm_id);
-               ucma_put_ctx(ctx);
-       }
-
        cancel_work_sync(&ctx->close_work);
-       /* At this point it's guaranteed that there is no inflight closing task */
-       if (ctx->cm_id)
+       if (refcount_read(&ctx->ref))
                ucma_close_id(&ctx->close_work);
-       return ucma_free_ctx(ctx);
+
+       events_reported = ucma_cleanup_ctx_events(ctx);
+       ucma_cleanup_multicast(ctx);
+
+       WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, XA_ZERO_ENTRY, NULL,
+                          GFP_KERNEL) != NULL);
+       mutex_destroy(&ctx->mutex);
+       kfree(ctx);
+       return events_reported;
 }
 
 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
@@ -596,14 +601,17 @@ static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
 
        xa_lock(&ctx_table);
        ctx = _ucma_find_context(cmd.id, file);
-       if (!IS_ERR(ctx))
-               __xa_erase(&ctx_table, ctx->id);
+       if (!IS_ERR(ctx)) {
+               if (__xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY,
+                                GFP_KERNEL) != ctx)
+                       ctx = ERR_PTR(-ENOENT);
+       }
        xa_unlock(&ctx_table);
 
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
 
-       resp.events_reported = __destroy_id(ctx);
+       resp.events_reported = ucma_destroy_private_ctx(ctx);
        if (copy_to_user(u64_to_user_ptr(cmd.response),
                         &resp, sizeof(resp)))
                ret = -EFAULT;
@@ -1777,15 +1785,16 @@ static int ucma_close(struct inode *inode, struct file *filp)
         * prevented by this being a FD release function. The list_add_tail() in
         * ucma_connect_event_handler() can run concurrently, however it only
         * adds to the list *after* a listening ID. By only reading the first of
-        * the list, and relying on __destroy_id() to block
+        * the list, and relying on ucma_destroy_private_ctx() to block
         * ucma_connect_event_handler(), no additional locking is needed.
         */
        while (!list_empty(&file->ctx_list)) {
                struct ucma_context *ctx = list_first_entry(
                        &file->ctx_list, struct ucma_context, list);
 
-               xa_erase(&ctx_table, ctx->id);
-               __destroy_id(ctx);
+               WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY,
+                                  GFP_KERNEL) != ctx);
+               ucma_destroy_private_ctx(ctx);
        }
        kfree(file);
        return 0;
index 7ca4112e3e8f7ff001c154225743391be9f9fb8a..917338db7ac13e8d6582c5ba84f00b6b85b369b4 100644 (file)
@@ -135,7 +135,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
         */
        if (mask)
                pgsz_bitmap &= GENMASK(count_trailing_zeros(mask), 0);
-       return rounddown_pow_of_two(pgsz_bitmap);
+       return pgsz_bitmap ? rounddown_pow_of_two(pgsz_bitmap) : 0;
 }
 EXPORT_SYMBOL(ib_umem_find_best_pgsz);
 
index 3bae9ba0ead8518eba40570130412955d2c19f92..d26f3f3e0462a03e8cc38058124add5bcb0f86d8 100644 (file)
@@ -3956,7 +3956,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
 
        err = set_has_smi_cap(dev);
        if (err)
-               return err;
+               goto err_mp;
 
        if (!mlx5_core_mp_enabled(mdev)) {
                for (i = 1; i <= dev->num_ports; i++) {
@@ -4319,7 +4319,7 @@ static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
 
        err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
        if (err)
-               mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+               mlx5_free_bfreg(dev->mdev, &dev->bfreg);
 
        return err;
 }
index bc98bd950d99fadb0a24e2b9979be7fa1369b77e..3acb5c10b1553f03ab96a3336d9c0318e9edd966 100644 (file)
@@ -434,9 +434,9 @@ static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
                pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
                       __func__, dev->id, pd->id);
        }
-       kfree(uctx->cntxt_pd);
        uctx->cntxt_pd = NULL;
        _ocrdma_dealloc_pd(dev, pd);
+       kfree(pd);
 }
 
 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
index 38a37770c01627cd3baaf9e9a270a491b2a5655e..3705c6b8b2237105997082ee0173e1130edae9fa 100644 (file)
@@ -214,6 +214,7 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
 
                }
                usnic_uiom_free_dev_list(dev_list);
+               dev_list = NULL;
        }
 
        /* Try to find resources on an unused vf */
@@ -239,6 +240,8 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
 qp_grp_check:
        if (IS_ERR_OR_NULL(qp_grp)) {
                usnic_err("Failed to allocate qp_grp\n");
+               if (usnic_ib_share_vf)
+                       usnic_uiom_free_dev_list(dev_list);
                return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
        }
        return qp_grp;
index 41dba7090c2ae9fafa91dd2372f43458b0e318d9..c770951a909c905475ae1d81d1aa3150806d3cd5 100644 (file)
@@ -96,9 +96,10 @@ static int imx_icc_node_init_qos(struct icc_provider *provider,
                        return -ENODEV;
                }
                /* Allow scaling to be disabled on a per-node basis */
-               if (!dn || !of_device_is_available(dn)) {
+               if (!of_device_is_available(dn)) {
                        dev_warn(dev, "Missing property %s, skip scaling %s\n",
                                 adj->phandle_name, node->name);
+                       of_node_put(dn);
                        return 0;
                }
 
index ba43a15aefec0d6ad6dd7753b193d351b4724d49..d7768d3c6d8aa18dbf251e0117ac16eeebd0d4d7 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/interconnect-provider.h>
 #include <dt-bindings/interconnect/imx8mq.h>
 
 #include "imx.h"
@@ -94,6 +95,7 @@ static struct platform_driver imx8mq_icc_driver = {
        .remove = imx8mq_icc_remove,
        .driver = {
                .name = "imx8mq-interconnect",
+               .sync_state = icc_sync_state,
        },
 };
 
index a8f93ba265f8106bb050dd6b41f74727d4ca0f84..b3fb5b02bcf1ea8b28882f26e9ef79aad18bf121 100644 (file)
@@ -42,13 +42,23 @@ config INTERCONNECT_QCOM_QCS404
          This is a driver for the Qualcomm Network-on-Chip on qcs404-based
          platforms.
 
+config INTERCONNECT_QCOM_RPMH_POSSIBLE
+       tristate
+       default INTERCONNECT_QCOM
+       depends on QCOM_RPMH || (COMPILE_TEST && !QCOM_RPMH)
+       depends on QCOM_COMMAND_DB || (COMPILE_TEST && !QCOM_COMMAND_DB)
+       depends on OF || COMPILE_TEST
+       help
+         Compile-testing RPMH drivers is possible on other platforms,
+         but in order to avoid link failures, drivers must not be built-in
+         when QCOM_RPMH or QCOM_COMMAND_DB are loadable modules
+
 config INTERCONNECT_QCOM_RPMH
        tristate
 
 config INTERCONNECT_QCOM_SC7180
        tristate "Qualcomm SC7180 interconnect driver"
-       depends on INTERCONNECT_QCOM
-       depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+       depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
        select INTERCONNECT_QCOM_RPMH
        select INTERCONNECT_QCOM_BCM_VOTER
        help
@@ -57,8 +67,7 @@ config INTERCONNECT_QCOM_SC7180
 
 config INTERCONNECT_QCOM_SDM845
        tristate "Qualcomm SDM845 interconnect driver"
-       depends on INTERCONNECT_QCOM
-       depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+       depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
        select INTERCONNECT_QCOM_RPMH
        select INTERCONNECT_QCOM_BCM_VOTER
        help
@@ -67,8 +76,7 @@ config INTERCONNECT_QCOM_SDM845
 
 config INTERCONNECT_QCOM_SM8150
        tristate "Qualcomm SM8150 interconnect driver"
-       depends on INTERCONNECT_QCOM
-       depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+       depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
        select INTERCONNECT_QCOM_RPMH
        select INTERCONNECT_QCOM_BCM_VOTER
        help
@@ -77,8 +85,7 @@ config INTERCONNECT_QCOM_SM8150
 
 config INTERCONNECT_QCOM_SM8250
        tristate "Qualcomm SM8250 interconnect driver"
-       depends on INTERCONNECT_QCOM
-       depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+       depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
        select INTERCONNECT_QCOM_RPMH
        select INTERCONNECT_QCOM_BCM_VOTER
        help
index f54cd79b43e401703813810baaac260c922cf278..6a1f7048dacc6642ddca950bbdd497eafb9463e6 100644 (file)
@@ -1973,8 +1973,6 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
                return r;
        }
 
-       iommu->int_enabled = true;
-
        return 0;
 }
 
@@ -2169,6 +2167,7 @@ static int iommu_init_irq(struct amd_iommu *iommu)
        if (ret)
                return ret;
 
+       iommu->int_enabled = true;
 enable_faults:
        iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
 
index 7e2c445a1faec2a1c7179f4605290ff2c9160a14..f0adbc48fd17954085033bec1e53c740c3040066 100644 (file)
@@ -3854,6 +3854,9 @@ static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec,
        struct amd_iommu *iommu;
        int devid = -1;
 
+       if (!amd_iommu_irq_remap)
+               return 0;
+
        if (x86_fwspec_is_ioapic(fwspec))
                devid = get_ioapic_devid(fwspec->param[0]);
        else if (x86_fwspec_is_hpet(fwspec))
index 5dff7ffbef119ec1ae3c5ec6c6f0ad8a6b42eac9..bcda17012aee8fa433cbc3f31896613e3e6e2606 100644 (file)
@@ -196,6 +196,8 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
 
                set_bit(qsmmu->bypass_cbndx, smmu->context_map);
 
+               arm_smmu_cb_write(smmu, qsmmu->bypass_cbndx, ARM_SMMU_CB_SCTLR, 0);
+
                reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS);
                arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg);
        }
@@ -323,7 +325,9 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
 }
 
 static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
+       { .compatible = "qcom,msm8998-smmu-v2" },
        { .compatible = "qcom,sc7180-smmu-500" },
+       { .compatible = "qcom,sdm630-smmu-v2" },
        { .compatible = "qcom,sdm845-smmu-500" },
        { .compatible = "qcom,sm8150-smmu-500" },
        { .compatible = "qcom,sm8250-smmu-500" },
index f0305e6aac1b87fa3ca15ce7871ad7f943614131..4078358ed66ea86ddf61d24b982c159b35087d3a 100644 (file)
@@ -863,33 +863,6 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
        unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
        int i, count = 0;
 
-       /*
-        * The Intel graphic driver is used to assume that the returned
-        * sg list is not combound. This blocks the efforts of converting
-        * Intel IOMMU driver to dma-iommu api's. Add this quirk to make the
-        * device driver work and should be removed once it's fixed in i915
-        * driver.
-        */
-       if (IS_ENABLED(CONFIG_DRM_I915) && dev_is_pci(dev) &&
-           to_pci_dev(dev)->vendor == PCI_VENDOR_ID_INTEL &&
-           (to_pci_dev(dev)->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
-               for_each_sg(sg, s, nents, i) {
-                       unsigned int s_iova_off = sg_dma_address(s);
-                       unsigned int s_length = sg_dma_len(s);
-                       unsigned int s_iova_len = s->length;
-
-                       s->offset += s_iova_off;
-                       s->length = s_length;
-                       sg_dma_address(s) = dma_addr + s_iova_off;
-                       sg_dma_len(s) = s_length;
-                       dma_addr += s_iova_len;
-
-                       pr_info_once("sg combining disabled due to i915 driver\n");
-               }
-
-               return nents;
-       }
-
        for_each_sg(sg, s, nents, i) {
                /* Restore this segment's original unaligned fields first */
                unsigned int s_iova_off = sg_dma_address(s);
index b46dbfa6d0ed691c86a2970dda3c8d04baa35c26..004feaed3c72cf0304b3392f51955143926e7f94 100644 (file)
@@ -1461,8 +1461,8 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
                int mask = ilog2(__roundup_pow_of_two(npages));
                unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
 
-               if (WARN_ON_ONCE(!ALIGN(addr, align)))
-                       addr &= ~(align - 1);
+               if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
+                       addr = ALIGN_DOWN(addr, align);
 
                desc.qw0 = QI_EIOTLB_PASID(pasid) |
                                QI_EIOTLB_DID(did) |
index 788119c5b021bd69ee2e4bdefaee8bc68a062d96..f665322a09919fbaea5dd3eae59c7ac87a7fe04d 100644 (file)
@@ -38,7 +38,6 @@
 #include <linux/dmi.h>
 #include <linux/pci-ats.h>
 #include <linux/memblock.h>
-#include <linux/dma-map-ops.h>
 #include <linux/dma-direct.h>
 #include <linux/crash_dump.h>
 #include <linux/numa.h>
@@ -719,6 +718,8 @@ static int domain_update_device_node(struct dmar_domain *domain)
        return nid;
 }
 
+static void domain_update_iotlb(struct dmar_domain *domain);
+
 /* Some capabilities may be different across iommus */
 static void domain_update_iommu_cap(struct dmar_domain *domain)
 {
@@ -744,6 +745,8 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
                domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1);
        else
                domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
+
+       domain_update_iotlb(domain);
 }
 
 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
@@ -1464,17 +1467,22 @@ static void domain_update_iotlb(struct dmar_domain *domain)
 
        assert_spin_locked(&device_domain_lock);
 
-       list_for_each_entry(info, &domain->devices, link) {
-               struct pci_dev *pdev;
-
-               if (!info->dev || !dev_is_pci(info->dev))
-                       continue;
-
-               pdev = to_pci_dev(info->dev);
-               if (pdev->ats_enabled) {
+       list_for_each_entry(info, &domain->devices, link)
+               if (info->ats_enabled) {
                        has_iotlb_device = true;
                        break;
                }
+
+       if (!has_iotlb_device) {
+               struct subdev_domain_info *sinfo;
+
+               list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+                       info = get_domain_info(sinfo->pdev);
+                       if (info && info->ats_enabled) {
+                               has_iotlb_device = true;
+                               break;
+                       }
+               }
        }
 
        domain->has_iotlb_device = has_iotlb_device;
@@ -1555,25 +1563,37 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
 #endif
 }
 
+static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
+                                   u64 addr, unsigned int mask)
+{
+       u16 sid, qdep;
+
+       if (!info || !info->ats_enabled)
+               return;
+
+       sid = info->bus << 8 | info->devfn;
+       qdep = info->ats_qdep;
+       qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
+                          qdep, addr, mask);
+}
+
 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
                                  u64 addr, unsigned mask)
 {
-       u16 sid, qdep;
        unsigned long flags;
        struct device_domain_info *info;
+       struct subdev_domain_info *sinfo;
 
        if (!domain->has_iotlb_device)
                return;
 
        spin_lock_irqsave(&device_domain_lock, flags);
-       list_for_each_entry(info, &domain->devices, link) {
-               if (!info->ats_enabled)
-                       continue;
+       list_for_each_entry(info, &domain->devices, link)
+               __iommu_flush_dev_iotlb(info, addr, mask);
 
-               sid = info->bus << 8 | info->devfn;
-               qdep = info->ats_qdep;
-               qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
-                               qdep, addr, mask);
+       list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+               info = get_domain_info(sinfo->pdev);
+               __iommu_flush_dev_iotlb(info, addr, mask);
        }
        spin_unlock_irqrestore(&device_domain_lock, flags);
 }
@@ -1877,6 +1897,7 @@ static struct dmar_domain *alloc_domain(int flags)
                domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
        domain->has_iotlb_device = false;
        INIT_LIST_HEAD(&domain->devices);
+       INIT_LIST_HEAD(&domain->subdevices);
 
        return domain;
 }
@@ -2547,7 +2568,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
        info->iommu = iommu;
        info->pasid_table = NULL;
        info->auxd_enabled = 0;
-       INIT_LIST_HEAD(&info->auxiliary_domains);
+       INIT_LIST_HEAD(&info->subdevices);
 
        if (dev && dev_is_pci(dev)) {
                struct pci_dev *pdev = to_pci_dev(info->dev);
@@ -4475,33 +4496,61 @@ is_aux_domain(struct device *dev, struct iommu_domain *domain)
                        domain->type == IOMMU_DOMAIN_UNMANAGED;
 }
 
-static void auxiliary_link_device(struct dmar_domain *domain,
-                                 struct device *dev)
+static inline struct subdev_domain_info *
+lookup_subdev_info(struct dmar_domain *domain, struct device *dev)
+{
+       struct subdev_domain_info *sinfo;
+
+       if (!list_empty(&domain->subdevices)) {
+               list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+                       if (sinfo->pdev == dev)
+                               return sinfo;
+               }
+       }
+
+       return NULL;
+}
+
+static int auxiliary_link_device(struct dmar_domain *domain,
+                                struct device *dev)
 {
        struct device_domain_info *info = get_domain_info(dev);
+       struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
 
        assert_spin_locked(&device_domain_lock);
        if (WARN_ON(!info))
-               return;
+               return -EINVAL;
 
-       domain->auxd_refcnt++;
-       list_add(&domain->auxd, &info->auxiliary_domains);
+       if (!sinfo) {
+               sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC);
+               sinfo->domain = domain;
+               sinfo->pdev = dev;
+               list_add(&sinfo->link_phys, &info->subdevices);
+               list_add(&sinfo->link_domain, &domain->subdevices);
+       }
+
+       return ++sinfo->users;
 }
 
-static void auxiliary_unlink_device(struct dmar_domain *domain,
-                                   struct device *dev)
+static int auxiliary_unlink_device(struct dmar_domain *domain,
+                                  struct device *dev)
 {
        struct device_domain_info *info = get_domain_info(dev);
+       struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
+       int ret;
 
        assert_spin_locked(&device_domain_lock);
-       if (WARN_ON(!info))
-               return;
+       if (WARN_ON(!info || !sinfo || sinfo->users <= 0))
+               return -EINVAL;
 
-       list_del(&domain->auxd);
-       domain->auxd_refcnt--;
+       ret = --sinfo->users;
+       if (!ret) {
+               list_del(&sinfo->link_phys);
+               list_del(&sinfo->link_domain);
+               kfree(sinfo);
+       }
 
-       if (!domain->auxd_refcnt && domain->default_pasid > 0)
-               ioasid_put(domain->default_pasid);
+       return ret;
 }
 
 static int aux_domain_add_dev(struct dmar_domain *domain,
@@ -4530,6 +4579,19 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
        }
 
        spin_lock_irqsave(&device_domain_lock, flags);
+       ret = auxiliary_link_device(domain, dev);
+       if (ret <= 0)
+               goto link_failed;
+
+       /*
+        * Subdevices from the same physical device can be attached to the
+        * same domain. For such cases, only the first subdevice attachment
+        * needs to go through the full steps in this function. So if ret >
+        * 1, just goto out.
+        */
+       if (ret > 1)
+               goto out;
+
        /*
         * iommu->lock must be held to attach domain to iommu and setup the
         * pasid entry for second level translation.
@@ -4548,10 +4610,9 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
                                                     domain->default_pasid);
        if (ret)
                goto table_failed;
-       spin_unlock(&iommu->lock);
-
-       auxiliary_link_device(domain, dev);
 
+       spin_unlock(&iommu->lock);
+out:
        spin_unlock_irqrestore(&device_domain_lock, flags);
 
        return 0;
@@ -4560,8 +4621,10 @@ table_failed:
        domain_detach_iommu(domain, iommu);
 attach_failed:
        spin_unlock(&iommu->lock);
+       auxiliary_unlink_device(domain, dev);
+link_failed:
        spin_unlock_irqrestore(&device_domain_lock, flags);
-       if (!domain->auxd_refcnt && domain->default_pasid > 0)
+       if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
                ioasid_put(domain->default_pasid);
 
        return ret;
@@ -4581,14 +4644,18 @@ static void aux_domain_remove_dev(struct dmar_domain *domain,
        info = get_domain_info(dev);
        iommu = info->iommu;
 
-       auxiliary_unlink_device(domain, dev);
-
-       spin_lock(&iommu->lock);
-       intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid, false);
-       domain_detach_iommu(domain, iommu);
-       spin_unlock(&iommu->lock);
+       if (!auxiliary_unlink_device(domain, dev)) {
+               spin_lock(&iommu->lock);
+               intel_pasid_tear_down_entry(iommu, dev,
+                                           domain->default_pasid, false);
+               domain_detach_iommu(domain, iommu);
+               spin_unlock(&iommu->lock);
+       }
 
        spin_unlock_irqrestore(&device_domain_lock, flags);
+
+       if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
+               ioasid_put(domain->default_pasid);
 }
 
 static int prepare_domain_attach_device(struct iommu_domain *domain,
index aeffda92b10b76058e2ef11a8b6d1319ddfb72d8..685200a5cff0f30578d847e3915aa5179fb66e7d 100644 (file)
@@ -1353,6 +1353,8 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
                irq_data = irq_domain_get_irq_data(domain, virq + i);
                irq_cfg = irqd_cfg(irq_data);
                if (!irq_data || !irq_cfg) {
+                       if (!i)
+                               kfree(data);
                        ret = -EINVAL;
                        goto out_free_data;
                }
index 4fa248b98031cde564692b9bc5642cae2aeda04e..18a9f05df4079b1b3ee2d19f8a748536cbec360e 100644 (file)
@@ -118,8 +118,10 @@ void intel_svm_check(struct intel_iommu *iommu)
        iommu->flags |= VTD_FLAG_SVM_CAPABLE;
 }
 
-static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
-                               unsigned long address, unsigned long pages, int ih)
+static void __flush_svm_range_dev(struct intel_svm *svm,
+                                 struct intel_svm_dev *sdev,
+                                 unsigned long address,
+                                 unsigned long pages, int ih)
 {
        struct qi_desc desc;
 
@@ -142,7 +144,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
        }
        desc.qw2 = 0;
        desc.qw3 = 0;
-       qi_submit_sync(svm->iommu, &desc, 1, 0);
+       qi_submit_sync(sdev->iommu, &desc, 1, 0);
 
        if (sdev->dev_iotlb) {
                desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) |
@@ -166,7 +168,23 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
                }
                desc.qw2 = 0;
                desc.qw3 = 0;
-               qi_submit_sync(svm->iommu, &desc, 1, 0);
+               qi_submit_sync(sdev->iommu, &desc, 1, 0);
+       }
+}
+
+static void intel_flush_svm_range_dev(struct intel_svm *svm,
+                                     struct intel_svm_dev *sdev,
+                                     unsigned long address,
+                                     unsigned long pages, int ih)
+{
+       unsigned long shift = ilog2(__roundup_pow_of_two(pages));
+       unsigned long align = (1ULL << (VTD_PAGE_SHIFT + shift));
+       unsigned long start = ALIGN_DOWN(address, align);
+       unsigned long end = ALIGN(address + (pages << VTD_PAGE_SHIFT), align);
+
+       while (start < end) {
+               __flush_svm_range_dev(svm, sdev, start, align >> VTD_PAGE_SHIFT, ih);
+               start += align;
        }
 }
 
@@ -211,7 +229,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
         */
        rcu_read_lock();
        list_for_each_entry_rcu(sdev, &svm->devs, list)
-               intel_pasid_tear_down_entry(svm->iommu, sdev->dev,
+               intel_pasid_tear_down_entry(sdev->iommu, sdev->dev,
                                            svm->pasid, true);
        rcu_read_unlock();
 
@@ -281,6 +299,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
        struct dmar_domain *dmar_domain;
        struct device_domain_info *info;
        struct intel_svm *svm = NULL;
+       unsigned long iflags;
        int ret = 0;
 
        if (WARN_ON(!iommu) || !data)
@@ -363,6 +382,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
        }
        sdev->dev = dev;
        sdev->sid = PCI_DEVID(info->bus, info->devfn);
+       sdev->iommu = iommu;
 
        /* Only count users if device has aux domains */
        if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
@@ -381,12 +401,12 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
         * each bind of a new device even with an existing PASID, we need to
         * call the nested mode setup function here.
         */
-       spin_lock(&iommu->lock);
+       spin_lock_irqsave(&iommu->lock, iflags);
        ret = intel_pasid_setup_nested(iommu, dev,
                                       (pgd_t *)(uintptr_t)data->gpgd,
                                       data->hpasid, &data->vendor.vtd, dmar_domain,
                                       data->addr_width);
-       spin_unlock(&iommu->lock);
+       spin_unlock_irqrestore(&iommu->lock, iflags);
        if (ret) {
                dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n",
                                    data->hpasid, ret);
@@ -486,6 +506,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
        struct device_domain_info *info;
        struct intel_svm_dev *sdev;
        struct intel_svm *svm = NULL;
+       unsigned long iflags;
        int pasid_max;
        int ret;
 
@@ -546,6 +567,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
                goto out;
        }
        sdev->dev = dev;
+       sdev->iommu = iommu;
 
        ret = intel_iommu_enable_pasid(iommu, dev);
        if (ret) {
@@ -575,7 +597,6 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
                        kfree(sdev);
                        goto out;
                }
-               svm->iommu = iommu;
 
                if (pasid_max > intel_pasid_max_id)
                        pasid_max = intel_pasid_max_id;
@@ -605,14 +626,14 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
                        }
                }
 
-               spin_lock(&iommu->lock);
+               spin_lock_irqsave(&iommu->lock, iflags);
                ret = intel_pasid_setup_first_level(iommu, dev,
                                mm ? mm->pgd : init_mm.pgd,
                                svm->pasid, FLPT_DEFAULT_DID,
                                (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
                                (cpu_feature_enabled(X86_FEATURE_LA57) ?
                                 PASID_FLAG_FL5LP : 0));
-               spin_unlock(&iommu->lock);
+               spin_unlock_irqrestore(&iommu->lock, iflags);
                if (ret) {
                        if (mm)
                                mmu_notifier_unregister(&svm->notifier, mm);
@@ -632,14 +653,14 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
                 * Binding a new device with existing PASID, need to setup
                 * the PASID entry.
                 */
-               spin_lock(&iommu->lock);
+               spin_lock_irqsave(&iommu->lock, iflags);
                ret = intel_pasid_setup_first_level(iommu, dev,
                                                mm ? mm->pgd : init_mm.pgd,
                                                svm->pasid, FLPT_DEFAULT_DID,
                                                (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
                                                (cpu_feature_enabled(X86_FEATURE_LA57) ?
                                                PASID_FLAG_FL5LP : 0));
-               spin_unlock(&iommu->lock);
+               spin_unlock_irqrestore(&iommu->lock, iflags);
                if (ret) {
                        kfree(sdev);
                        goto out;
index 4bb3293ae4d73553968821e9a8d2e5bc4414a64c..d20b8b333d30d179960088f54af95614ed8569e0 100644 (file)
@@ -358,7 +358,7 @@ static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
  * @iovad: - iova domain in question.
  * @pfn: - page frame number
  * This function finds and returns an iova belonging to the
- * given doamin which matches the given pfn.
+ * given domain which matches the given pfn.
  */
 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
 {
@@ -601,7 +601,7 @@ void queue_iova(struct iova_domain *iovad,
 EXPORT_SYMBOL_GPL(queue_iova);
 
 /**
- * put_iova_domain - destroys the iova doamin
+ * put_iova_domain - destroys the iova domain
  * @iovad: - iova domain in question.
  * All the iova's in that domain are destroyed.
  */
@@ -712,9 +712,9 @@ EXPORT_SYMBOL_GPL(reserve_iova);
 
 /**
  * copy_reserved_iova - copies the reserved between domains
- * @from: - source doamin from where to copy
+ * @from: - source domain from where to copy
  * @to: - destination domin where to copy
- * This function copies reserved iova's from one doamin to
+ * This function copies reserved iova's from one domain to
  * other.
  */
 void
index 26cf0ac9c4ad0e2c10fa259b2afc4b3cc220e3a9..c9a53c2224728d149c15bf2c7a0c3ebd97c0d6e4 100644 (file)
@@ -13,6 +13,7 @@ if MISDN != n
 config MISDN_DSP
        tristate "Digital Audio Processing of transparent data"
        depends on MISDN
+       select BITREVERSE
        help
          Enable support for digital audio processing capability.
 
index 8f39f9ba5c80e8ecb5e6493aa0fde6c0e4834aa0..4c2ce210c1237d0fb36fd8b519564dcbb5b54070 100644 (file)
@@ -19,6 +19,7 @@ if NVM
 
 config NVM_PBLK
        tristate "Physical Block Device Open-Channel SSD target"
+       select CRC32
        help
          Allows an open-channel SSD to be exposed as a block device to the
          host. The target assumes the device exposes raw flash and must be
index b7e2d96666142eb948924a8112ed02e2c3f779f1..9e44c09f6410890f43faaad328e7edea0f5da986 100644 (file)
@@ -605,6 +605,7 @@ config DM_INTEGRITY
        select BLK_DEV_INTEGRITY
        select DM_BUFIO
        select CRYPTO
+       select CRYPTO_SKCIPHER
        select ASYNC_XOR
        help
          This device-mapper target emulates a block device that has
@@ -622,6 +623,7 @@ config DM_ZONED
        tristate "Drive-managed zoned block device target support"
        depends on BLK_DEV_DM
        depends on BLK_DEV_ZONED
+       select CRC32
        help
          This device-mapper target takes a host-managed or host-aware zoned
          block device and exposes most of its capacity as a regular block
index 6469223f0b777143f30ba30255768885db7b8aff..d636b7b2d070c49608aeb910096edfb32bcb4469 100644 (file)
@@ -17,7 +17,7 @@ struct feature {
 };
 
 static struct feature feature_list[] = {
-       {BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LARGE_BUCKET,
+       {BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE,
                "large_bucket"},
        {0, 0, 0 },
 };
index a1653c4780416cce384187781e5f617079f82a32..84fc2c0f01015b280ed86ba702c99ef29db95465 100644 (file)
 
 /* Feature set definition */
 /* Incompat feature set */
-#define BCH_FEATURE_INCOMPAT_LARGE_BUCKET      0x0001 /* 32bit bucket size */
+/* 32bit bucket size, obsoleted */
+#define BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET         0x0001
+/* real bucket size is (1 << bucket_size) */
+#define BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE     0x0002
 
-#define BCH_FEATURE_COMPAT_SUUP                0
-#define BCH_FEATURE_RO_COMPAT_SUUP     0
-#define BCH_FEATURE_INCOMPAT_SUUP      BCH_FEATURE_INCOMPAT_LARGE_BUCKET
+#define BCH_FEATURE_COMPAT_SUPP                0
+#define BCH_FEATURE_RO_COMPAT_SUPP     0
+#define BCH_FEATURE_INCOMPAT_SUPP      (BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET| \
+                                        BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE)
 
 #define BCH_HAS_COMPAT_FEATURE(sb, mask) \
                ((sb)->feature_compat & (mask))
@@ -77,7 +81,23 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \
                ~BCH##_FEATURE_INCOMPAT_##flagname; \
 }
 
-BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LARGE_BUCKET);
+BCH_FEATURE_INCOMPAT_FUNCS(obso_large_bucket, OBSO_LARGE_BUCKET);
+BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LOG_LARGE_BUCKET_SIZE);
+
+static inline bool bch_has_unknown_compat_features(struct cache_sb *sb)
+{
+       return ((sb->feature_compat & ~BCH_FEATURE_COMPAT_SUPP) != 0);
+}
+
+static inline bool bch_has_unknown_ro_compat_features(struct cache_sb *sb)
+{
+       return ((sb->feature_ro_compat & ~BCH_FEATURE_RO_COMPAT_SUPP) != 0);
+}
+
+static inline bool bch_has_unknown_incompat_features(struct cache_sb *sb)
+{
+       return ((sb->feature_incompat & ~BCH_FEATURE_INCOMPAT_SUPP) != 0);
+}
 
 int bch_print_cache_set_feature_compat(struct cache_set *c, char *buf, int size);
 int bch_print_cache_set_feature_ro_compat(struct cache_set *c, char *buf, int size);
index a4752ac410dc4e93ed3095a37e1cd9e4a3958977..2047a9cccdb5dee42a081dfa21f3efbc49410129 100644 (file)
@@ -64,9 +64,25 @@ static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s
 {
        unsigned int bucket_size = le16_to_cpu(s->bucket_size);
 
-       if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES &&
-            bch_has_feature_large_bucket(sb))
-               bucket_size |= le16_to_cpu(s->bucket_size_hi) << 16;
+       if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
+               if (bch_has_feature_large_bucket(sb)) {
+                       unsigned int max, order;
+
+                       max = sizeof(unsigned int) * BITS_PER_BYTE - 1;
+                       order = le16_to_cpu(s->bucket_size);
+                       /*
+                        * bcache tool will make sure the overflow won't
+                        * happen, an error message here is enough.
+                        */
+                       if (order > max)
+                               pr_err("Bucket size (1 << %u) overflows\n",
+                                       order);
+                       bucket_size = 1 << order;
+               } else if (bch_has_feature_obso_large_bucket(sb)) {
+                       bucket_size +=
+                               le16_to_cpu(s->obso_bucket_size_hi) << 16;
+               }
+       }
 
        return bucket_size;
 }
@@ -228,6 +244,20 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
                sb->feature_compat = le64_to_cpu(s->feature_compat);
                sb->feature_incompat = le64_to_cpu(s->feature_incompat);
                sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat);
+
+               /* Check incompatible features */
+               err = "Unsupported compatible feature found";
+               if (bch_has_unknown_compat_features(sb))
+                       goto err;
+
+               err = "Unsupported read-only compatible feature found";
+               if (bch_has_unknown_ro_compat_features(sb))
+                       goto err;
+
+               err = "Unsupported incompatible feature found";
+               if (bch_has_unknown_incompat_features(sb))
+                       goto err;
+
                err = read_super_common(sb, bdev, s);
                if (err)
                        goto err;
@@ -1302,6 +1332,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
        bcache_device_link(&dc->disk, c, "bdev");
        atomic_inc(&c->attached_dev_nr);
 
+       if (bch_has_feature_obso_large_bucket(&(c->cache->sb))) {
+               pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
+               pr_err("Please update to the latest bcache-tools to create the cache device\n");
+               set_disk_ro(dc->disk.disk, 1);
+       }
+
        /* Allow the writeback thread to proceed */
        up_write(&dc->writeback_lock);
 
@@ -1524,6 +1560,12 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
 
        bcache_device_link(d, c, "volume");
 
+       if (bch_has_feature_obso_large_bucket(&c->cache->sb)) {
+               pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
+               pr_err("Please update to the latest bcache-tools to create the cache device\n");
+               set_disk_ro(d->disk, 1);
+       }
+
        return 0;
 err:
        kobject_put(&d->kobj);
@@ -2083,6 +2125,9 @@ static int run_cache_set(struct cache_set *c)
        c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
        bcache_write_super(c);
 
+       if (bch_has_feature_obso_large_bucket(&c->cache->sb))
+               pr_err("Detect obsoleted large bucket layout, all attached bcache device will be read-only\n");
+
        list_for_each_entry_safe(dc, t, &uncached_devices, list)
                bch_cached_dev_attach(dc, c, NULL);
 
@@ -2644,8 +2689,8 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
        }
 
        list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
+               char *pdev_set_uuid = pdev->dc->sb.set_uuid;
                list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
-                       char *pdev_set_uuid = pdev->dc->sb.set_uuid;
                        char *set_uuid = c->set_uuid;
 
                        if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
index 9c1a86bde658e48b67dcc67058d01de0c15d6e12..fce4cbf9529d6c85e5f7df6e99eb71db2c1df053 100644 (file)
@@ -1534,6 +1534,12 @@ sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
 }
 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
 
+struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
+{
+       return c->dm_io;
+}
+EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
+
 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
 {
        return b->block;
index 5f9f9b3a226d7ccad1bedc57c1fede7f1a9c888e..8c874710f0bcfdca9c741018fca1ebd53f7ebfa2 100644 (file)
@@ -1454,13 +1454,16 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
 static void kcryptd_async_done(struct crypto_async_request *async_req,
                               int error);
 
-static void crypt_alloc_req_skcipher(struct crypt_config *cc,
+static int crypt_alloc_req_skcipher(struct crypt_config *cc,
                                     struct convert_context *ctx)
 {
        unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
 
-       if (!ctx->r.req)
-               ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO);
+       if (!ctx->r.req) {
+               ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
+               if (!ctx->r.req)
+                       return -ENOMEM;
+       }
 
        skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
 
@@ -1471,13 +1474,18 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
        skcipher_request_set_callback(ctx->r.req,
            CRYPTO_TFM_REQ_MAY_BACKLOG,
            kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
+
+       return 0;
 }
 
-static void crypt_alloc_req_aead(struct crypt_config *cc,
+static int crypt_alloc_req_aead(struct crypt_config *cc,
                                 struct convert_context *ctx)
 {
-       if (!ctx->r.req_aead)
-               ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO);
+       if (!ctx->r.req) {
+               ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
+               if (!ctx->r.req)
+                       return -ENOMEM;
+       }
 
        aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
 
@@ -1488,15 +1496,17 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
        aead_request_set_callback(ctx->r.req_aead,
            CRYPTO_TFM_REQ_MAY_BACKLOG,
            kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
+
+       return 0;
 }
 
-static void crypt_alloc_req(struct crypt_config *cc,
+static int crypt_alloc_req(struct crypt_config *cc,
                            struct convert_context *ctx)
 {
        if (crypt_integrity_aead(cc))
-               crypt_alloc_req_aead(cc, ctx);
+               return crypt_alloc_req_aead(cc, ctx);
        else
-               crypt_alloc_req_skcipher(cc, ctx);
+               return crypt_alloc_req_skcipher(cc, ctx);
 }
 
 static void crypt_free_req_skcipher(struct crypt_config *cc,
@@ -1529,17 +1539,28 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
  * Encrypt / decrypt data from one bio to another one (can be the same one)
  */
 static blk_status_t crypt_convert(struct crypt_config *cc,
-                        struct convert_context *ctx, bool atomic)
+                        struct convert_context *ctx, bool atomic, bool reset_pending)
 {
        unsigned int tag_offset = 0;
        unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
        int r;
 
-       atomic_set(&ctx->cc_pending, 1);
+       /*
+        * if reset_pending is set we are dealing with the bio for the first time,
+        * else we're continuing to work on the previous bio, so don't mess with
+        * the cc_pending counter
+        */
+       if (reset_pending)
+               atomic_set(&ctx->cc_pending, 1);
 
        while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
 
-               crypt_alloc_req(cc, ctx);
+               r = crypt_alloc_req(cc, ctx);
+               if (r) {
+                       complete(&ctx->restart);
+                       return BLK_STS_DEV_RESOURCE;
+               }
+
                atomic_inc(&ctx->cc_pending);
 
                if (crypt_integrity_aead(cc))
@@ -1553,7 +1574,25 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
                 * but the driver request queue is full, let's wait.
                 */
                case -EBUSY:
-                       wait_for_completion(&ctx->restart);
+                       if (in_interrupt()) {
+                               if (try_wait_for_completion(&ctx->restart)) {
+                                       /*
+                                        * we don't have to block to wait for completion,
+                                        * so proceed
+                                        */
+                               } else {
+                                       /*
+                                        * we can't wait for completion without blocking
+                                        * exit and continue processing in a workqueue
+                                        */
+                                       ctx->r.req = NULL;
+                                       ctx->cc_sector += sector_step;
+                                       tag_offset++;
+                                       return BLK_STS_DEV_RESOURCE;
+                               }
+                       } else {
+                               wait_for_completion(&ctx->restart);
+                       }
                        reinit_completion(&ctx->restart);
                        fallthrough;
                /*
@@ -1691,6 +1730,12 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
        atomic_inc(&io->io_pending);
 }
 
+static void kcryptd_io_bio_endio(struct work_struct *work)
+{
+       struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+       bio_endio(io->base_bio);
+}
+
 /*
  * One of the bios was finished. Check for completion of
  * the whole request and correctly clean up the buffer.
@@ -1713,7 +1758,23 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
                kfree(io->integrity_metadata);
 
        base_bio->bi_status = error;
-       bio_endio(base_bio);
+
+       /*
+        * If we are running this function from our tasklet,
+        * we can't call bio_endio() here, because it will call
+        * clone_endio() from dm.c, which in turn will
+        * free the current struct dm_crypt_io structure with
+        * our tasklet. In this case we need to delay bio_endio()
+        * execution to after the tasklet is done and dequeued.
+        */
+       if (tasklet_trylock(&io->tasklet)) {
+               tasklet_unlock(&io->tasklet);
+               bio_endio(base_bio);
+               return;
+       }
+
+       INIT_WORK(&io->work, kcryptd_io_bio_endio);
+       queue_work(cc->io_queue, &io->work);
 }
 
 /*
@@ -1945,6 +2006,37 @@ static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
        }
 }
 
+static void kcryptd_crypt_write_continue(struct work_struct *work)
+{
+       struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+       struct crypt_config *cc = io->cc;
+       struct convert_context *ctx = &io->ctx;
+       int crypt_finished;
+       sector_t sector = io->sector;
+       blk_status_t r;
+
+       wait_for_completion(&ctx->restart);
+       reinit_completion(&ctx->restart);
+
+       r = crypt_convert(cc, &io->ctx, true, false);
+       if (r)
+               io->error = r;
+       crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
+       if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
+               /* Wait for completion signaled by kcryptd_async_done() */
+               wait_for_completion(&ctx->restart);
+               crypt_finished = 1;
+       }
+
+       /* Encryption was already finished, submit io now */
+       if (crypt_finished) {
+               kcryptd_crypt_write_io_submit(io, 0);
+               io->sector = sector;
+       }
+
+       crypt_dec_pending(io);
+}
+
 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
 {
        struct crypt_config *cc = io->cc;
@@ -1973,7 +2065,17 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
 
        crypt_inc_pending(io);
        r = crypt_convert(cc, ctx,
-                         test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags));
+                         test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
+       /*
+        * Crypto API backlogged the request, because its queue was full
+        * and we're in softirq context, so continue from a workqueue
+        * (TODO: is it actually possible to be in softirq in the write path?)
+        */
+       if (r == BLK_STS_DEV_RESOURCE) {
+               INIT_WORK(&io->work, kcryptd_crypt_write_continue);
+               queue_work(cc->crypt_queue, &io->work);
+               return;
+       }
        if (r)
                io->error = r;
        crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
@@ -1998,6 +2100,25 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
        crypt_dec_pending(io);
 }
 
+static void kcryptd_crypt_read_continue(struct work_struct *work)
+{
+       struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+       struct crypt_config *cc = io->cc;
+       blk_status_t r;
+
+       wait_for_completion(&io->ctx.restart);
+       reinit_completion(&io->ctx.restart);
+
+       r = crypt_convert(cc, &io->ctx, true, false);
+       if (r)
+               io->error = r;
+
+       if (atomic_dec_and_test(&io->ctx.cc_pending))
+               kcryptd_crypt_read_done(io);
+
+       crypt_dec_pending(io);
+}
+
 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
 {
        struct crypt_config *cc = io->cc;
@@ -2009,7 +2130,16 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
                           io->sector);
 
        r = crypt_convert(cc, &io->ctx,
-                         test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags));
+                         test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
+       /*
+        * Crypto API backlogged the request, because its queue was full
+        * and we're in softirq context, so continue from a workqueue
+        */
+       if (r == BLK_STS_DEV_RESOURCE) {
+               INIT_WORK(&io->work, kcryptd_crypt_read_continue);
+               queue_work(cc->crypt_queue, &io->work);
+               return;
+       }
        if (r)
                io->error = r;
 
@@ -2091,8 +2221,12 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
 
        if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
            (bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
-               if (in_irq()) {
-                       /* Crypto API's "skcipher_walk_first() refuses to work in hard IRQ context */
+               /*
+                * in_irq(): Crypto API's skcipher_walk_first() refuses to work in hard IRQ context.
+                * irqs_disabled(): the kernel may run some IO completion from the idle thread, but
+                * it is being executed with irqs disabled.
+                */
+               if (in_irq() || irqs_disabled()) {
                        tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
                        tasklet_schedule(&io->tasklet);
                        return;
@@ -3166,12 +3300,11 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        }
 
        if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
-               cc->crypt_queue = alloc_workqueue("kcryptd-%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
+               cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
                                                  1, devname);
        else
-               cc->crypt_queue = alloc_workqueue("kcryptd-%s",
-                                                 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM |
-                                                 WQ_UNBOUND | WQ_SYSFS,
+               cc->crypt_queue = alloc_workqueue("kcryptd/%s",
+                                                 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
                                                  num_online_cpus(), devname);
        if (!cc->crypt_queue) {
                ti->error = "Couldn't create kcryptd queue";
index 5a7a1b90e671cfa09c64dc268547f31101f18cd9..81df019ab284a031218f66fcbbe881be82fef634 100644 (file)
@@ -1379,12 +1379,52 @@ thorough_test:
 #undef MAY_BE_HASH
 }
 
-static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
+struct flush_request {
+       struct dm_io_request io_req;
+       struct dm_io_region io_reg;
+       struct dm_integrity_c *ic;
+       struct completion comp;
+};
+
+static void flush_notify(unsigned long error, void *fr_)
+{
+       struct flush_request *fr = fr_;
+       if (unlikely(error != 0))
+               dm_integrity_io_error(fr->ic, "flusing disk cache", -EIO);
+       complete(&fr->comp);
+}
+
+static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
 {
        int r;
+
+       struct flush_request fr;
+
+       if (!ic->meta_dev)
+               flush_data = false;
+       if (flush_data) {
+               fr.io_req.bi_op = REQ_OP_WRITE,
+               fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
+               fr.io_req.mem.type = DM_IO_KMEM,
+               fr.io_req.mem.ptr.addr = NULL,
+               fr.io_req.notify.fn = flush_notify,
+               fr.io_req.notify.context = &fr;
+               fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
+               fr.io_reg.bdev = ic->dev->bdev,
+               fr.io_reg.sector = 0,
+               fr.io_reg.count = 0,
+               fr.ic = ic;
+               init_completion(&fr.comp);
+               r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
+               BUG_ON(r);
+       }
+
        r = dm_bufio_write_dirty_buffers(ic->bufio);
        if (unlikely(r))
                dm_integrity_io_error(ic, "writing tags", r);
+
+       if (flush_data)
+               wait_for_completion(&fr.comp);
 }
 
 static void sleep_on_endio_wait(struct dm_integrity_c *ic)
@@ -2110,7 +2150,7 @@ offload_to_thread:
 
        if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
                integrity_metadata(&dio->work);
-               dm_integrity_flush_buffers(ic);
+               dm_integrity_flush_buffers(ic, false);
 
                dio->in_flight = (atomic_t)ATOMIC_INIT(1);
                dio->completion = NULL;
@@ -2195,7 +2235,7 @@ static void integrity_commit(struct work_struct *w)
        flushes = bio_list_get(&ic->flush_bio_list);
        if (unlikely(ic->mode != 'J')) {
                spin_unlock_irq(&ic->endio_wait.lock);
-               dm_integrity_flush_buffers(ic);
+               dm_integrity_flush_buffers(ic, true);
                goto release_flush_bios;
        }
 
@@ -2409,7 +2449,7 @@ skip_io:
        complete_journal_op(&comp);
        wait_for_completion_io(&comp.comp);
 
-       dm_integrity_flush_buffers(ic);
+       dm_integrity_flush_buffers(ic, true);
 }
 
 static void integrity_writer(struct work_struct *w)
@@ -2451,7 +2491,7 @@ static void recalc_write_super(struct dm_integrity_c *ic)
 {
        int r;
 
-       dm_integrity_flush_buffers(ic);
+       dm_integrity_flush_buffers(ic, false);
        if (dm_integrity_failed(ic))
                return;
 
@@ -2654,7 +2694,7 @@ static void bitmap_flush_work(struct work_struct *work)
        unsigned long limit;
        struct bio *bio;
 
-       dm_integrity_flush_buffers(ic);
+       dm_integrity_flush_buffers(ic, false);
 
        range.logical_sector = 0;
        range.n_sectors = ic->provided_data_sectors;
@@ -2663,9 +2703,7 @@ static void bitmap_flush_work(struct work_struct *work)
        add_new_range_and_wait(ic, &range);
        spin_unlock_irq(&ic->endio_wait.lock);
 
-       dm_integrity_flush_buffers(ic);
-       if (ic->meta_dev)
-               blkdev_issue_flush(ic->dev->bdev, GFP_NOIO);
+       dm_integrity_flush_buffers(ic, true);
 
        limit = ic->provided_data_sectors;
        if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
@@ -2934,11 +2972,11 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
                if (ic->meta_dev)
                        queue_work(ic->writer_wq, &ic->writer_work);
                drain_workqueue(ic->writer_wq);
-               dm_integrity_flush_buffers(ic);
+               dm_integrity_flush_buffers(ic, true);
        }
 
        if (ic->mode == 'B') {
-               dm_integrity_flush_buffers(ic);
+               dm_integrity_flush_buffers(ic, true);
 #if 1
                /* set to 0 to test bitmap replay code */
                init_journal(ic, 0, ic->journal_sections, 0);
@@ -3754,7 +3792,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
        unsigned extra_args;
        struct dm_arg_set as;
        static const struct dm_arg _args[] = {
-               {0, 9, "Invalid number of feature args"},
+               {0, 15, "Invalid number of feature args"},
        };
        unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
        bool should_write_sb;
index 23c38777e8f63892880bc608509c147836a5407a..cab12b2251bac21c2717912d7228132e4860b821 100644 (file)
@@ -3729,10 +3729,10 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
        blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
 
        /*
-        * RAID1 and RAID10 personalities require bio splitting,
-        * RAID0/4/5/6 don't and process large discard bios properly.
+        * RAID0 and RAID10 personalities require bio splitting,
+        * RAID1/4/5/6 don't and process large discard bios properly.
         */
-       if (rs_is_raid1(rs) || rs_is_raid10(rs)) {
+       if (rs_is_raid0(rs) || rs_is_raid10(rs)) {
                limits->discard_granularity = chunk_size_bytes;
                limits->max_discard_sectors = rs->md.chunk_sectors;
        }
index 4668b2cd98f4e246f217674839f2f6b82a834fef..11890db71f3fe30a6a01958ae1da28ceb5d76d79 100644 (file)
@@ -141,6 +141,11 @@ struct dm_snapshot {
         * for them to be committed.
         */
        struct bio_list bios_queued_during_merge;
+
+       /*
+        * Flush data after merge.
+        */
+       struct bio flush_bio;
 };
 
 /*
@@ -1121,6 +1126,17 @@ shut:
 
 static void error_bios(struct bio *bio);
 
+static int flush_data(struct dm_snapshot *s)
+{
+       struct bio *flush_bio = &s->flush_bio;
+
+       bio_reset(flush_bio);
+       bio_set_dev(flush_bio, s->origin->bdev);
+       flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+
+       return submit_bio_wait(flush_bio);
+}
+
 static void merge_callback(int read_err, unsigned long write_err, void *context)
 {
        struct dm_snapshot *s = context;
@@ -1134,6 +1150,11 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
                goto shut;
        }
 
+       if (flush_data(s) < 0) {
+               DMERR("Flush after merge failed: shutting down merge");
+               goto shut;
+       }
+
        if (s->store->type->commit_merge(s->store,
                                         s->num_merging_chunks) < 0) {
                DMERR("Write error in exception store: shutting down merge");
@@ -1318,6 +1339,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        s->first_merging_chunk = 0;
        s->num_merging_chunks = 0;
        bio_list_init(&s->bios_queued_during_merge);
+       bio_init(&s->flush_bio, NULL, 0);
 
        /* Allocate hash table for COW data */
        if (init_hash_tables(s)) {
@@ -1504,6 +1526,8 @@ static void snapshot_dtr(struct dm_target *ti)
 
        dm_exception_store_destroy(s->store);
 
+       bio_uninit(&s->flush_bio);
+
        dm_put_device(ti, s->cow);
 
        dm_put_device(ti, s->origin);
index b3c3c8b4cb428f587dbe9840b276fdc14d8fd630..7bac564f3faa6e1b49899b72e1d5286efcff840b 100644 (file)
@@ -562,7 +562,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
                 * subset of the parent bdev; require extra privileges.
                 */
                if (!capable(CAP_SYS_RAWIO)) {
-                       DMWARN_LIMIT(
+                       DMDEBUG_LIMIT(
        "%s: sending ioctl %x to DM device without required privilege.",
                                current->comm, cmd);
                        r = -ENOIOCTLCMD;
index beb482310a58640a255ad9c8a4c815326ad1411e..b2b3d2b0f808ad1a39b3fbf804c7eb52245ffff4 100644 (file)
@@ -472,8 +472,11 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
        cntr = &hdev->aggregated_cs_counters;
 
        cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
-       if (!cs)
+       if (!cs) {
+               atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+               atomic64_inc(&cntr->out_of_mem_drop_cnt);
                return -ENOMEM;
+       }
 
        cs->ctx = ctx;
        cs->submitted = false;
@@ -486,6 +489,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
 
        cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
        if (!cs_cmpl) {
+               atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+               atomic64_inc(&cntr->out_of_mem_drop_cnt);
                rc = -ENOMEM;
                goto free_cs;
        }
@@ -513,6 +518,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
        cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
                        sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
        if (!cs->jobs_in_queue_cnt) {
+               atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+               atomic64_inc(&cntr->out_of_mem_drop_cnt);
                rc = -ENOMEM;
                goto free_fence;
        }
@@ -562,7 +569,7 @@ void hl_cs_rollback_all(struct hl_device *hdev)
        for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
                flush_workqueue(hdev->cq_wq[i]);
 
-       /* Make sure we don't have leftovers in the H/W queues mirror list */
+       /* Make sure we don't have leftovers in the CS mirror list */
        list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
                cs_get(cs);
                cs->aborted = true;
@@ -764,11 +771,14 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
 
 static int hl_cs_copy_chunk_array(struct hl_device *hdev,
                                        struct hl_cs_chunk **cs_chunk_array,
-                                       void __user *chunks, u32 num_chunks)
+                                       void __user *chunks, u32 num_chunks,
+                                       struct hl_ctx *ctx)
 {
        u32 size_to_copy;
 
        if (num_chunks > HL_MAX_JOBS_PER_CS) {
+               atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+               atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
                dev_err(hdev->dev,
                        "Number of chunks can NOT be larger than %d\n",
                        HL_MAX_JOBS_PER_CS);
@@ -777,11 +787,16 @@ static int hl_cs_copy_chunk_array(struct hl_device *hdev,
 
        *cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
                                        GFP_ATOMIC);
-       if (!*cs_chunk_array)
+       if (!*cs_chunk_array) {
+               atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+               atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
                return -ENOMEM;
+       }
 
        size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
        if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
+               atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+               atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
                dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
                kfree(*cs_chunk_array);
                return -EFAULT;
@@ -797,6 +812,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
        struct hl_device *hdev = hpriv->hdev;
        struct hl_cs_chunk *cs_chunk_array;
        struct hl_cs_counters_atomic *cntr;
+       struct hl_ctx *ctx = hpriv->ctx;
        struct hl_cs_job *job;
        struct hl_cs *cs;
        struct hl_cb *cb;
@@ -805,7 +821,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
        cntr = &hdev->aggregated_cs_counters;
        *cs_seq = ULLONG_MAX;
 
-       rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks);
+       rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
+                       hpriv->ctx);
        if (rc)
                goto out;
 
@@ -832,8 +849,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
                rc = validate_queue_index(hdev, chunk, &queue_type,
                                                &is_kernel_allocated_cb);
                if (rc) {
-                       atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
-                       atomic64_inc(&cntr->parsing_drop_cnt);
+                       atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+                       atomic64_inc(&cntr->validation_drop_cnt);
                        goto free_cs_object;
                }
 
@@ -841,8 +858,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
                        cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
                        if (!cb) {
                                atomic64_inc(
-                               &hpriv->ctx->cs_counters.parsing_drop_cnt);
-                               atomic64_inc(&cntr->parsing_drop_cnt);
+                                       &ctx->cs_counters.validation_drop_cnt);
+                               atomic64_inc(&cntr->validation_drop_cnt);
                                rc = -EINVAL;
                                goto free_cs_object;
                        }
@@ -856,8 +873,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
                job = hl_cs_allocate_job(hdev, queue_type,
                                                is_kernel_allocated_cb);
                if (!job) {
-                       atomic64_inc(
-                       &hpriv->ctx->cs_counters.out_of_mem_drop_cnt);
+                       atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
                        atomic64_inc(&cntr->out_of_mem_drop_cnt);
                        dev_err(hdev->dev, "Failed to allocate a new job\n");
                        rc = -ENOMEM;
@@ -891,7 +907,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
 
                rc = cs_parser(hpriv, job);
                if (rc) {
-                       atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
+                       atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
                        atomic64_inc(&cntr->parsing_drop_cnt);
                        dev_err(hdev->dev,
                                "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
@@ -901,8 +917,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
        }
 
        if (int_queues_only) {
-               atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
-               atomic64_inc(&cntr->parsing_drop_cnt);
+               atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+               atomic64_inc(&cntr->validation_drop_cnt);
                dev_err(hdev->dev,
                        "Reject CS %d.%llu because only internal queues jobs are present\n",
                        cs->ctx->asid, cs->sequence);
@@ -1042,7 +1058,7 @@ out:
 }
 
 static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
-               struct hl_cs_chunk *chunk, u64 *signal_seq)
+               struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx)
 {
        u64 *signal_seq_arr = NULL;
        u32 size_to_copy, signal_seq_arr_len;
@@ -1052,6 +1068,8 @@ static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
 
        /* currently only one signal seq is supported */
        if (signal_seq_arr_len != 1) {
+               atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+               atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
                dev_err(hdev->dev,
                        "Wait for signal CS supports only one signal CS seq\n");
                return -EINVAL;
@@ -1060,13 +1078,18 @@ static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
        signal_seq_arr = kmalloc_array(signal_seq_arr_len,
                                        sizeof(*signal_seq_arr),
                                        GFP_ATOMIC);
-       if (!signal_seq_arr)
+       if (!signal_seq_arr) {
+               atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+               atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
                return -ENOMEM;
+       }
 
        size_to_copy = chunk->num_signal_seq_arr * sizeof(*signal_seq_arr);
        if (copy_from_user(signal_seq_arr,
                                u64_to_user_ptr(chunk->signal_seq_arr),
                                size_to_copy)) {
+               atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+               atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
                dev_err(hdev->dev,
                        "Failed to copy signal seq array from user\n");
                rc = -EFAULT;
@@ -1153,6 +1176,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
        struct hl_device *hdev = hpriv->hdev;
        struct hl_cs_compl *sig_waitcs_cmpl;
        u32 q_idx, collective_engine_id = 0;
+       struct hl_cs_counters_atomic *cntr;
        struct hl_fence *sig_fence = NULL;
        struct hl_ctx *ctx = hpriv->ctx;
        enum hl_queue_type q_type;
@@ -1160,9 +1184,11 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
        u64 signal_seq;
        int rc;
 
+       cntr = &hdev->aggregated_cs_counters;
        *cs_seq = ULLONG_MAX;
 
-       rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks);
+       rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
+                       ctx);
        if (rc)
                goto out;
 
@@ -1170,6 +1196,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
        chunk = &cs_chunk_array[0];
 
        if (chunk->queue_index >= hdev->asic_prop.max_queues) {
+               atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+               atomic64_inc(&cntr->validation_drop_cnt);
                dev_err(hdev->dev, "Queue index %d is invalid\n",
                        chunk->queue_index);
                rc = -EINVAL;
@@ -1181,6 +1209,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
        q_type = hw_queue_prop->type;
 
        if (!hw_queue_prop->supports_sync_stream) {
+               atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+               atomic64_inc(&cntr->validation_drop_cnt);
                dev_err(hdev->dev,
                        "Queue index %d does not support sync stream operations\n",
                        q_idx);
@@ -1190,6 +1220,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
 
        if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
                if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
+                       atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+                       atomic64_inc(&cntr->validation_drop_cnt);
                        dev_err(hdev->dev,
                                "Queue index %d is invalid\n", q_idx);
                        rc = -EINVAL;
@@ -1200,12 +1232,14 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
        }
 
        if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT) {
-               rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq);
+               rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq, ctx);
                if (rc)
                        goto free_cs_chunk_array;
 
                sig_fence = hl_ctx_get_fence(ctx, signal_seq);
                if (IS_ERR(sig_fence)) {
+                       atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+                       atomic64_inc(&cntr->validation_drop_cnt);
                        dev_err(hdev->dev,
                                "Failed to get signal CS with seq 0x%llx\n",
                                signal_seq);
@@ -1223,6 +1257,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
                        container_of(sig_fence, struct hl_cs_compl, base_fence);
 
                if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL) {
+                       atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+                       atomic64_inc(&cntr->validation_drop_cnt);
                        dev_err(hdev->dev,
                                "CS seq 0x%llx is not of a signal CS\n",
                                signal_seq);
@@ -1270,8 +1306,11 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
        else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
                rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
                                cs, q_idx, collective_engine_id);
-       else
+       else {
+               atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+               atomic64_inc(&cntr->validation_drop_cnt);
                rc = -EINVAL;
+       }
 
        if (rc)
                goto free_cs_object;
index 5871162a84425ef895852296a1b7b44b02ef166b..1456eabf96010f94584e17db22dfda4263195801 100644 (file)
@@ -17,12 +17,12 @@ enum hl_device_status hl_device_status(struct hl_device *hdev)
 {
        enum hl_device_status status;
 
-       if (hdev->disabled)
-               status = HL_DEVICE_STATUS_MALFUNCTION;
-       else if (atomic_read(&hdev->in_reset))
+       if (atomic_read(&hdev->in_reset))
                status = HL_DEVICE_STATUS_IN_RESET;
        else if (hdev->needs_reset)
                status = HL_DEVICE_STATUS_NEEDS_RESET;
+       else if (hdev->disabled)
+               status = HL_DEVICE_STATUS_MALFUNCTION;
        else
                status = HL_DEVICE_STATUS_OPERATIONAL;
 
@@ -1092,6 +1092,7 @@ kill_processes:
                                                GFP_KERNEL);
                if (!hdev->kernel_ctx) {
                        rc = -ENOMEM;
+                       hl_mmu_fini(hdev);
                        goto out_err;
                }
 
@@ -1103,6 +1104,7 @@ kill_processes:
                                "failed to init kernel ctx in hard reset\n");
                        kfree(hdev->kernel_ctx);
                        hdev->kernel_ctx = NULL;
+                       hl_mmu_fini(hdev);
                        goto out_err;
                }
        }
index 0e1c629e9800ab4ad834765f2b4643fc84639f63..20f77f58edef5573577592110289f3937202208f 100644 (file)
@@ -627,25 +627,38 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
        security_status = RREG32(cpu_security_boot_status_reg);
 
        /* We read security status multiple times during boot:
-        * 1. preboot - we check if fw security feature is supported
-        * 2. boot cpu - we get boot cpu security status
-        * 3. FW application - we get FW application security status
+        * 1. preboot - a. Check whether the security status bits are valid
+        *              b. Check whether fw security is enabled
+        *              c. Check whether hard reset is done by preboot
+        * 2. boot cpu - a. Fetch boot cpu security status
+        *               b. Check whether hard reset is done by boot cpu
+        * 3. FW application - a. Fetch fw application security status
+        *                     b. Check whether hard reset is done by fw app
         *
         * Preboot:
         * Check security status bit (CPU_BOOT_DEV_STS0_ENABLED), if it is set
         * check security enabled bit (CPU_BOOT_DEV_STS0_SECURITY_EN)
         */
        if (security_status & CPU_BOOT_DEV_STS0_ENABLED) {
-               hdev->asic_prop.fw_security_status_valid = 1;
-               prop->fw_security_disabled =
-                       !(security_status & CPU_BOOT_DEV_STS0_SECURITY_EN);
+               prop->fw_security_status_valid = 1;
+
+               if (security_status & CPU_BOOT_DEV_STS0_SECURITY_EN)
+                       prop->fw_security_disabled = false;
+               else
+                       prop->fw_security_disabled = true;
+
+               if (security_status & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
+                       prop->hard_reset_done_by_fw = true;
        } else {
-               hdev->asic_prop.fw_security_status_valid = 0;
+               prop->fw_security_status_valid = 0;
                prop->fw_security_disabled = true;
        }
 
+       dev_dbg(hdev->dev, "Firmware preboot hard-reset is %s\n",
+                       prop->hard_reset_done_by_fw ? "enabled" : "disabled");
+
        dev_info(hdev->dev, "firmware-level security is %s\n",
-               prop->fw_security_disabled ? "disabled" : "enabled");
+                       prop->fw_security_disabled ? "disabled" : "enabled");
 
        return 0;
 }
@@ -655,6 +668,7 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
                        u32 cpu_security_boot_status_reg, u32 boot_err0_reg,
                        bool skip_bmc, u32 cpu_timeout, u32 boot_fit_timeout)
 {
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
        u32 status;
        int rc;
 
@@ -723,11 +737,22 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
        /* Read U-Boot version now in case we will later fail */
        hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_UBOOT);
 
+       /* Clear reset status since we need to read it again from boot CPU */
+       prop->hard_reset_done_by_fw = false;
+
        /* Read boot_cpu security bits */
-       if (hdev->asic_prop.fw_security_status_valid)
-               hdev->asic_prop.fw_boot_cpu_security_map =
+       if (prop->fw_security_status_valid) {
+               prop->fw_boot_cpu_security_map =
                                RREG32(cpu_security_boot_status_reg);
 
+               if (prop->fw_boot_cpu_security_map &
+                               CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
+                       prop->hard_reset_done_by_fw = true;
+       }
+
+       dev_dbg(hdev->dev, "Firmware boot CPU hard-reset is %s\n",
+                       prop->hard_reset_done_by_fw ? "enabled" : "disabled");
+
        if (rc) {
                detect_cpu_boot_status(hdev, status);
                rc = -EIO;
@@ -796,18 +821,21 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
                goto out;
        }
 
+       /* Clear reset status since we need to read again from app */
+       prop->hard_reset_done_by_fw = false;
+
        /* Read FW application security bits */
-       if (hdev->asic_prop.fw_security_status_valid) {
-               hdev->asic_prop.fw_app_security_map =
+       if (prop->fw_security_status_valid) {
+               prop->fw_app_security_map =
                                RREG32(cpu_security_boot_status_reg);
 
-               if (hdev->asic_prop.fw_app_security_map &
+               if (prop->fw_app_security_map &
                                CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
-                       hdev->asic_prop.hard_reset_done_by_fw = true;
+                       prop->hard_reset_done_by_fw = true;
        }
 
-       dev_dbg(hdev->dev, "Firmware hard-reset is %s\n",
-               hdev->asic_prop.hard_reset_done_by_fw ? "enabled" : "disabled");
+       dev_dbg(hdev->dev, "Firmware application CPU hard-reset is %s\n",
+                       prop->hard_reset_done_by_fw ? "enabled" : "disabled");
 
        dev_info(hdev->dev, "Successfully loaded firmware to device\n");
 
index 571eda6ef5ab09b77e2ce2a7a43c609efd82397d..e0d7f5fbaa5c3f2950068d4618b2d5fb5b540a9c 100644 (file)
@@ -944,7 +944,7 @@ struct hl_asic_funcs {
        u32 (*get_signal_cb_size)(struct hl_device *hdev);
        u32 (*get_wait_cb_size)(struct hl_device *hdev);
        u32 (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id,
-                       u32 size);
+                       u32 size, bool eb);
        u32 (*gen_wait_cb)(struct hl_device *hdev,
                        struct hl_gen_wait_properties *prop);
        void (*reset_sob)(struct hl_device *hdev, void *data);
@@ -1000,6 +1000,7 @@ struct hl_va_range {
  * @queue_full_drop_cnt: dropped due to queue full
  * @device_in_reset_drop_cnt: dropped due to device in reset
  * @max_cs_in_flight_drop_cnt: dropped due to maximum CS in-flight
+ * @validation_drop_cnt: dropped due to error in validation
  */
 struct hl_cs_counters_atomic {
        atomic64_t out_of_mem_drop_cnt;
@@ -1007,6 +1008,7 @@ struct hl_cs_counters_atomic {
        atomic64_t queue_full_drop_cnt;
        atomic64_t device_in_reset_drop_cnt;
        atomic64_t max_cs_in_flight_drop_cnt;
+       atomic64_t validation_drop_cnt;
 };
 
 /**
index 6bbb6bca68600732fcae1ff0239a91afc6291a0f..032d114f01ea54b705f2f0df0cb93309d07e2947 100644 (file)
@@ -544,6 +544,7 @@ static struct pci_driver hl_pci_driver = {
        .id_table = ids,
        .probe = hl_pci_probe,
        .remove = hl_pci_remove,
+       .shutdown = hl_pci_remove,
        .driver.pm = &hl_pm_ops,
        .err_handler = &hl_pci_err_handler,
 };
index 32e6af1db4e35ed0f31360872e6b03c62ffc02dc..12efbd9d2e3a0b7459dcb8298e69248bef1e1750 100644 (file)
@@ -335,6 +335,8 @@ static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
                        atomic64_read(&cntr->device_in_reset_drop_cnt);
        cs_counters.total_max_cs_in_flight_drop_cnt =
                        atomic64_read(&cntr->max_cs_in_flight_drop_cnt);
+       cs_counters.total_validation_drop_cnt =
+                       atomic64_read(&cntr->validation_drop_cnt);
 
        if (hpriv->ctx) {
                cs_counters.ctx_out_of_mem_drop_cnt =
@@ -352,6 +354,9 @@ static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
                cs_counters.ctx_max_cs_in_flight_drop_cnt =
                                atomic64_read(
                        &hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt);
+               cs_counters.ctx_validation_drop_cnt =
+                               atomic64_read(
+                               &hpriv->ctx->cs_counters.validation_drop_cnt);
        }
 
        return copy_to_user(out, &cs_counters,
@@ -406,7 +411,7 @@ static int total_energy_consumption_info(struct hl_fpriv *hpriv,
 static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
 {
        struct hl_device *hdev = hpriv->hdev;
-       struct hl_pll_frequency_info freq_info = {0};
+       struct hl_pll_frequency_info freq_info = { {0} };
        u32 max_size = args->return_size;
        void __user *out = (void __user *) (uintptr_t) args->return_pointer;
        int rc;
index 7caf868d1585c2fbd1d5cb033cedde1ddbfb035e..76217258780a4b2649154e041a08e111e121f389 100644 (file)
@@ -418,8 +418,11 @@ static void init_signal_cs(struct hl_device *hdev,
                "generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n",
                cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx);
 
+       /* we set an EB since we must make sure all oeprations are done
+        * when sending the signal
+        */
        hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb,
-                               cs_cmpl->hw_sob->sob_id, 0);
+                               cs_cmpl->hw_sob->sob_id, 0, true);
 
        kref_get(&hw_sob->kref);
 
index 923b2606e29fe607214aa1b0e91183b22f595531..b4725e6101f6c1a8bb95628dcb0213613c164d28 100644 (file)
@@ -130,10 +130,8 @@ static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
        if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
                return 0;
 
-       if (val & PCI_CONFIG_ELBI_STS_ERR) {
-               dev_err(hdev->dev, "Error writing to ELBI\n");
+       if (val & PCI_CONFIG_ELBI_STS_ERR)
                return -EIO;
-       }
 
        if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
                dev_err(hdev->dev, "ELBI write didn't finish in time\n");
@@ -160,8 +158,12 @@ int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
 
        dbi_offset = addr & 0xFFF;
 
-       rc = hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
-       rc |= hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
+       /* Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+        * in case the firmware security is enabled
+        */
+       hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
+
+       rc = hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
                                data);
 
        if (rc)
@@ -244,9 +246,11 @@ int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
 
        rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val);
 
-       /* Return the DBI window to the default location */
-       rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
-       rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
+       /* Return the DBI window to the default location
+        * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+        * in case the firmware security is enabled
+        */
+       hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
 
        if (rc)
                dev_err(hdev->dev, "failed to map bar %u to 0x%08llx\n",
@@ -294,9 +298,11 @@ int hl_pci_set_outbound_region(struct hl_device *hdev,
        /* Enable */
        rc |= hl_pci_iatu_write(hdev, 0x004, 0x80000000);
 
-       /* Return the DBI window to the default location */
-       rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
-       rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
+       /* Return the DBI window to the default location
+        * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+        * in case the firmware security is enabled
+        */
+       hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
 
        return rc;
 }
index 1f1926607c5e7acf9eb2902fa6aecb0e0ebd38c7..8c09e4466af8ce72eff7787b1986389f68bdb595 100644 (file)
@@ -151,19 +151,6 @@ static const u16 gaudi_packet_sizes[MAX_PACKET_ID] = {
        [PACKET_LOAD_AND_EXE]   = sizeof(struct packet_load_and_exe)
 };
 
-static const u32 gaudi_pll_base_addresses[GAUDI_PLL_MAX] = {
-       [CPU_PLL] = mmPSOC_CPU_PLL_NR,
-       [PCI_PLL] = mmPSOC_PCI_PLL_NR,
-       [SRAM_PLL] = mmSRAM_W_PLL_NR,
-       [HBM_PLL] = mmPSOC_HBM_PLL_NR,
-       [NIC_PLL] = mmNIC0_PLL_NR,
-       [DMA_PLL] = mmDMA_W_PLL_NR,
-       [MESH_PLL] = mmMESH_W_PLL_NR,
-       [MME_PLL] = mmPSOC_MME_PLL_NR,
-       [TPC_PLL] = mmPSOC_TPC_PLL_NR,
-       [IF_PLL] = mmIF_W_PLL_NR
-};
-
 static inline bool validate_packet_id(enum packet_id id)
 {
        switch (id) {
@@ -374,7 +361,7 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev);
 static void gaudi_disable_clock_gating(struct hl_device *hdev);
 static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid);
 static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
-                               u32 size);
+                               u32 size, bool eb);
 static u32 gaudi_gen_wait_cb(struct hl_device *hdev,
                                struct hl_gen_wait_properties *prop);
 
@@ -667,12 +654,6 @@ static int gaudi_early_init(struct hl_device *hdev)
        if (rc)
                goto free_queue_props;
 
-       if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
-               dev_info(hdev->dev,
-                       "H/W state is dirty, must reset before initializing\n");
-               hdev->asic_funcs->hw_fini(hdev, true);
-       }
-
        /* Before continuing in the initialization, we need to read the preboot
         * version to determine whether we run with a security-enabled firmware
         */
@@ -685,6 +666,12 @@ static int gaudi_early_init(struct hl_device *hdev)
                goto pci_fini;
        }
 
+       if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
+               dev_info(hdev->dev,
+                       "H/W state is dirty, must reset before initializing\n");
+               hdev->asic_funcs->hw_fini(hdev, true);
+       }
+
        return 0;
 
 pci_fini:
@@ -703,93 +690,60 @@ static int gaudi_early_fini(struct hl_device *hdev)
 }
 
 /**
- * gaudi_fetch_pll_frequency - Fetch PLL frequency values
+ * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
  *
  * @hdev: pointer to hl_device structure
- * @pll_index: index of the pll to fetch frequency from
- * @pll_freq: pointer to store the pll frequency in MHz in each of the available
- *            outputs. if a certain output is not available a 0 will be set
  *
  */
-static int gaudi_fetch_pll_frequency(struct hl_device *hdev,
-                               enum gaudi_pll_index pll_index,
-                               u16 *pll_freq_arr)
+static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
 {
-       u32 nr = 0, nf = 0, od = 0, pll_clk = 0, div_fctr, div_sel,
-                       pll_base_addr = gaudi_pll_base_addresses[pll_index];
-       u16 freq = 0;
-       int i, rc;
-
-       if (hdev->asic_prop.fw_security_status_valid &&
-                       (hdev->asic_prop.fw_app_security_map &
-                                       CPU_BOOT_DEV_STS0_PLL_INFO_EN)) {
-               rc = hl_fw_cpucp_pll_info_get(hdev, pll_index, pll_freq_arr);
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
+       u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
+       u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
+       int rc;
 
-               if (rc)
-                       return rc;
-       } else if (hdev->asic_prop.fw_security_disabled) {
+       if (hdev->asic_prop.fw_security_disabled) {
                /* Backward compatibility */
-               nr = RREG32(pll_base_addr + PLL_NR_OFFSET);
-               nf = RREG32(pll_base_addr + PLL_NF_OFFSET);
-               od = RREG32(pll_base_addr + PLL_OD_OFFSET);
-
-               for (i = 0; i < HL_PLL_NUM_OUTPUTS; i++) {
-                       div_fctr = RREG32(pll_base_addr +
-                                       PLL_DIV_FACTOR_0_OFFSET + i * 4);
-                       div_sel = RREG32(pll_base_addr +
-                                       PLL_DIV_SEL_0_OFFSET + i * 4);
+               div_fctr = RREG32(mmPSOC_CPU_PLL_DIV_FACTOR_2);
+               div_sel = RREG32(mmPSOC_CPU_PLL_DIV_SEL_2);
+               nr = RREG32(mmPSOC_CPU_PLL_NR);
+               nf = RREG32(mmPSOC_CPU_PLL_NF);
+               od = RREG32(mmPSOC_CPU_PLL_OD);
 
-                       if (div_sel == DIV_SEL_REF_CLK ||
+               if (div_sel == DIV_SEL_REF_CLK ||
                                div_sel == DIV_SEL_DIVIDED_REF) {
-                               if (div_sel == DIV_SEL_REF_CLK)
-                                       freq = PLL_REF_CLK;
-                               else
-                                       freq = PLL_REF_CLK / (div_fctr + 1);
-                       } else if (div_sel == DIV_SEL_PLL_CLK ||
-                                       div_sel == DIV_SEL_DIVIDED_PLL) {
-                               pll_clk = PLL_REF_CLK * (nf + 1) /
-                                               ((nr + 1) * (od + 1));
-                               if (div_sel == DIV_SEL_PLL_CLK)
-                                       freq = pll_clk;
-                               else
-                                       freq = pll_clk / (div_fctr + 1);
-                       } else {
-                               dev_warn(hdev->dev,
-                                       "Received invalid div select value: %d",
-                                       div_sel);
-                       }
-
-                       pll_freq_arr[i] = freq;
+                       if (div_sel == DIV_SEL_REF_CLK)
+                               freq = PLL_REF_CLK;
+                       else
+                               freq = PLL_REF_CLK / (div_fctr + 1);
+               } else if (div_sel == DIV_SEL_PLL_CLK ||
+                       div_sel == DIV_SEL_DIVIDED_PLL) {
+                       pll_clk = PLL_REF_CLK * (nf + 1) /
+                                       ((nr + 1) * (od + 1));
+                       if (div_sel == DIV_SEL_PLL_CLK)
+                               freq = pll_clk;
+                       else
+                               freq = pll_clk / (div_fctr + 1);
+               } else {
+                       dev_warn(hdev->dev,
+                               "Received invalid div select value: %d",
+                               div_sel);
+                       freq = 0;
                }
        } else {
-               dev_err(hdev->dev, "Failed to fetch PLL frequency values\n");
-               return -EIO;
-       }
+               rc = hl_fw_cpucp_pll_info_get(hdev, CPU_PLL, pll_freq_arr);
 
-       return 0;
-}
-
-/**
- * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
- *
- * @hdev: pointer to hl_device structure
- *
- */
-static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
-{
-       struct asic_fixed_properties *prop = &hdev->asic_prop;
-       u16 pll_freq[HL_PLL_NUM_OUTPUTS];
-       int rc;
+               if (rc)
+                       return rc;
 
-       rc = gaudi_fetch_pll_frequency(hdev, CPU_PLL, pll_freq);
-       if (rc)
-               return rc;
+               freq = pll_freq_arr[2];
+       }
 
-       prop->psoc_timestamp_frequency = pll_freq[2];
-       prop->psoc_pci_pll_nr = 0;
-       prop->psoc_pci_pll_nf = 0;
-       prop->psoc_pci_pll_od = 0;
-       prop->psoc_pci_pll_div_factor = 0;
+       prop->psoc_timestamp_frequency = freq;
+       prop->psoc_pci_pll_nr = nr;
+       prop->psoc_pci_pll_nf = nf;
+       prop->psoc_pci_pll_od = od;
+       prop->psoc_pci_pll_div_factor = div_fctr;
 
        return 0;
 }
@@ -884,11 +838,17 @@ static int gaudi_init_tpc_mem(struct hl_device *hdev)
        size_t fw_size;
        void *cpu_addr;
        dma_addr_t dma_handle;
-       int rc;
+       int rc, count = 5;
 
+again:
        rc = request_firmware(&fw, GAUDI_TPC_FW_FILE, hdev->dev);
+       if (rc == -EINTR && count-- > 0) {
+               msleep(50);
+               goto again;
+       }
+
        if (rc) {
-               dev_err(hdev->dev, "Firmware file %s is not found!\n",
+               dev_err(hdev->dev, "Failed to load firmware file %s\n",
                                GAUDI_TPC_FW_FILE);
                goto out;
        }
@@ -1110,7 +1070,7 @@ static void gaudi_collective_slave_init_job(struct hl_device *hdev,
                prop->collective_sob_id, queue_id);
 
        cb_size += gaudi_gen_signal_cb(hdev, job->user_cb,
-                       prop->collective_sob_id, cb_size);
+                       prop->collective_sob_id, cb_size, false);
 }
 
 static void gaudi_collective_wait_init_cs(struct hl_cs *cs)
@@ -2449,8 +2409,6 @@ static void gaudi_init_golden_registers(struct hl_device *hdev)
        gaudi_init_e2e(hdev);
        gaudi_init_hbm_cred(hdev);
 
-       hdev->asic_funcs->disable_clock_gating(hdev);
-
        for (tpc_id = 0, tpc_offset = 0;
                                tpc_id < TPC_NUMBER_OF_ENGINES;
                                tpc_id++, tpc_offset += TPC_CFG_OFFSET) {
@@ -3462,6 +3420,9 @@ static void gaudi_set_clock_gating(struct hl_device *hdev)
        if (hdev->in_debug)
                return;
 
+       if (!hdev->asic_prop.fw_security_disabled)
+               return;
+
        for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) {
                enable = !!(hdev->clock_gating_mask &
                                (BIT_ULL(gaudi_dma_assignment[i])));
@@ -3513,7 +3474,7 @@ static void gaudi_disable_clock_gating(struct hl_device *hdev)
        u32 qman_offset;
        int i;
 
-       if (!(gaudi->hw_cap_initialized & HW_CAP_CLK_GATE))
+       if (!hdev->asic_prop.fw_security_disabled)
                return;
 
        for (i = 0, qman_offset = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
@@ -3806,7 +3767,7 @@ static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
 static void gaudi_pre_hw_init(struct hl_device *hdev)
 {
        /* Perform read from the device to make sure device is up */
-       RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+       RREG32(mmHW_STATE);
 
        if (hdev->asic_prop.fw_security_disabled) {
                /* Set the access through PCI bars (Linux driver only) as
@@ -3847,6 +3808,13 @@ static int gaudi_hw_init(struct hl_device *hdev)
                return rc;
        }
 
+       /* In case the clock gating was enabled in preboot we need to disable
+        * it here before touching the MME/TPC registers.
+        * There is no need to take clk gating mutex because when this function
+        * runs, no other relevant code can run
+        */
+       hdev->asic_funcs->disable_clock_gating(hdev);
+
        /* SRAM scrambler must be initialized after CPU is running from HBM */
        gaudi_init_scrambler_sram(hdev);
 
@@ -3885,7 +3853,7 @@ static int gaudi_hw_init(struct hl_device *hdev)
        }
 
        /* Perform read from the device to flush all configuration */
-       RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+       RREG32(mmHW_STATE);
 
        return 0;
 
@@ -3927,7 +3895,10 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
        /* I don't know what is the state of the CPU so make sure it is
         * stopped in any means necessary
         */
-       WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
+       if (hdev->asic_prop.hard_reset_done_by_fw)
+               WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_RST_DEV);
+       else
+               WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
 
        WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_HALT_MACHINE);
 
@@ -3971,11 +3942,15 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
 
                WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
                        1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
-       }
 
-       dev_info(hdev->dev,
-               "Issued HARD reset command, going to wait %dms\n",
-               reset_timeout_ms);
+               dev_info(hdev->dev,
+                       "Issued HARD reset command, going to wait %dms\n",
+                       reset_timeout_ms);
+       } else {
+               dev_info(hdev->dev,
+                       "Firmware performs HARD reset, going to wait %dms\n",
+                       reset_timeout_ms);
+       }
 
        /*
         * After hard reset, we can't poll the BTM_FSM register because the PSOC
@@ -7936,7 +7911,7 @@ static u32 gaudi_get_wait_cb_size(struct hl_device *hdev)
 }
 
 static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
-                               u32 size)
+                               u32 size, bool eb)
 {
        struct hl_cb *cb = (struct hl_cb *) data;
        struct packet_msg_short *pkt;
@@ -7953,7 +7928,7 @@ static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
        ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
        ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 3); /* W_S SOB base */
        ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
-       ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 1);
+       ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, eb);
        ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
        ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
 
index f2d91f4fcffea3f19e9511435a78ce700e4a8cad..a7ab2d7e57d449bea2ee98bb663ab8c90ffc02c5 100644 (file)
 #define MME_ACC_OFFSET         (mmMME1_ACC_BASE - mmMME0_ACC_BASE)
 #define SRAM_BANK_OFFSET       (mmSRAM_Y0_X1_RTR_BASE - mmSRAM_Y0_X0_RTR_BASE)
 
-#define PLL_NR_OFFSET          0
-#define PLL_NF_OFFSET          (mmPSOC_CPU_PLL_NF - mmPSOC_CPU_PLL_NR)
-#define PLL_OD_OFFSET          (mmPSOC_CPU_PLL_OD - mmPSOC_CPU_PLL_NR)
-#define PLL_DIV_FACTOR_0_OFFSET        (mmPSOC_CPU_PLL_DIV_FACTOR_0 - \
-                               mmPSOC_CPU_PLL_NR)
-#define PLL_DIV_SEL_0_OFFSET   (mmPSOC_CPU_PLL_DIV_SEL_0 - mmPSOC_CPU_PLL_NR)
-
 #define NUM_OF_SOB_IN_BLOCK            \
        (((mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_2047 - \
        mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0) + 4) >> 2)
index 2e3612e1ee28d7620a85dac854def1628e90fb7e..88a09d42e111ccae600f8dc14ffb5462d5d47f55 100644 (file)
@@ -9,6 +9,7 @@
 #include "../include/gaudi/gaudi_coresight.h"
 #include "../include/gaudi/asic_reg/gaudi_regs.h"
 #include "../include/gaudi/gaudi_masks.h"
+#include "../include/gaudi/gaudi_reg_map.h"
 
 #include <uapi/misc/habanalabs.h>
 #define SPMU_SECTION_SIZE              MME0_ACC_SPMU_MAX_OFFSET
@@ -874,7 +875,7 @@ int gaudi_debug_coresight(struct hl_device *hdev, void *data)
        }
 
        /* Perform read from the device to flush all configuration */
-       RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+       RREG32(mmHW_STATE);
 
        return rc;
 }
index 3e5eb9e3d7bd84d4434a744903c231eb6ee538ce..b8b4aa636b7cb652a4e7e5790ef26cfb6792fee6 100644 (file)
@@ -613,12 +613,6 @@ static int goya_early_init(struct hl_device *hdev)
        if (rc)
                goto free_queue_props;
 
-       if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
-               dev_info(hdev->dev,
-                       "H/W state is dirty, must reset before initializing\n");
-               hdev->asic_funcs->hw_fini(hdev, true);
-       }
-
        /* Before continuing in the initialization, we need to read the preboot
         * version to determine whether we run with a security-enabled firmware
         */
@@ -631,6 +625,12 @@ static int goya_early_init(struct hl_device *hdev)
                goto pci_fini;
        }
 
+       if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
+               dev_info(hdev->dev,
+                       "H/W state is dirty, must reset before initializing\n");
+               hdev->asic_funcs->hw_fini(hdev, true);
+       }
+
        if (!hdev->pldm) {
                val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
                if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
@@ -694,32 +694,47 @@ static void goya_qman0_set_security(struct hl_device *hdev, bool secure)
 static void goya_fetch_psoc_frequency(struct hl_device *hdev)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
-       u32 trace_freq = 0;
-       u32 pll_clk = 0;
-       u32 div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
-       u32 div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
-       u32 nr = RREG32(mmPSOC_PCI_PLL_NR);
-       u32 nf = RREG32(mmPSOC_PCI_PLL_NF);
-       u32 od = RREG32(mmPSOC_PCI_PLL_OD);
-
-       if (div_sel == DIV_SEL_REF_CLK || div_sel == DIV_SEL_DIVIDED_REF) {
-               if (div_sel == DIV_SEL_REF_CLK)
-                       trace_freq = PLL_REF_CLK;
-               else
-                       trace_freq = PLL_REF_CLK / (div_fctr + 1);
-       } else if (div_sel == DIV_SEL_PLL_CLK ||
-                                       div_sel == DIV_SEL_DIVIDED_PLL) {
-               pll_clk = PLL_REF_CLK * (nf + 1) / ((nr + 1) * (od + 1));
-               if (div_sel == DIV_SEL_PLL_CLK)
-                       trace_freq = pll_clk;
-               else
-                       trace_freq = pll_clk / (div_fctr + 1);
+       u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
+       u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
+       int rc;
+
+       if (hdev->asic_prop.fw_security_disabled) {
+               div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
+               div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
+               nr = RREG32(mmPSOC_PCI_PLL_NR);
+               nf = RREG32(mmPSOC_PCI_PLL_NF);
+               od = RREG32(mmPSOC_PCI_PLL_OD);
+
+               if (div_sel == DIV_SEL_REF_CLK ||
+                               div_sel == DIV_SEL_DIVIDED_REF) {
+                       if (div_sel == DIV_SEL_REF_CLK)
+                               freq = PLL_REF_CLK;
+                       else
+                               freq = PLL_REF_CLK / (div_fctr + 1);
+               } else if (div_sel == DIV_SEL_PLL_CLK ||
+                               div_sel == DIV_SEL_DIVIDED_PLL) {
+                       pll_clk = PLL_REF_CLK * (nf + 1) /
+                                       ((nr + 1) * (od + 1));
+                       if (div_sel == DIV_SEL_PLL_CLK)
+                               freq = pll_clk;
+                       else
+                               freq = pll_clk / (div_fctr + 1);
+               } else {
+                       dev_warn(hdev->dev,
+                               "Received invalid div select value: %d",
+                               div_sel);
+                       freq = 0;
+               }
        } else {
-               dev_warn(hdev->dev,
-                       "Received invalid div select value: %d", div_sel);
+               rc = hl_fw_cpucp_pll_info_get(hdev, PCI_PLL, pll_freq_arr);
+
+               if (rc)
+                       return;
+
+               freq = pll_freq_arr[1];
        }
 
-       prop->psoc_timestamp_frequency = trace_freq;
+       prop->psoc_timestamp_frequency = freq;
        prop->psoc_pci_pll_nr = nr;
        prop->psoc_pci_pll_nf = nf;
        prop->psoc_pci_pll_od = od;
@@ -5324,7 +5339,7 @@ static u32 goya_get_wait_cb_size(struct hl_device *hdev)
 }
 
 static u32 goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
-               u32 size)
+                               u32 size, bool eb)
 {
        return 0;
 }
index e5801ecf0cb23b33d8fd923dcb77f7ccc8a248e4..b637dfd69f6e0deeade855774c1e21e171ef0c49 100644 (file)
  *                                     implemented. This means that FW will
  *                                     perform hard reset procedure on
  *                                     receiving the halt-machine event.
- *                                     Initialized in: linux
+ *                                     Initialized in: preboot, u-boot, linux
  *
  * CPU_BOOT_DEV_STS0_PLL_INFO_EN       FW retrieval of PLL info is enabled.
  *                                     Initialized in: linux
  *
+ * CPU_BOOT_DEV_STS0_CLK_GATE_EN       Clock Gating enabled.
+ *                                     FW initialized Clock Gating.
+ *                                     Initialized in: preboot
+ *
  * CPU_BOOT_DEV_STS0_ENABLED           Device status register enabled.
  *                                     This is a main indication that the
  *                                     running FW populates the device status
 #define CPU_BOOT_DEV_STS0_DRAM_SCR_EN                  (1 << 9)
 #define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN               (1 << 10)
 #define CPU_BOOT_DEV_STS0_PLL_INFO_EN                  (1 << 11)
+#define CPU_BOOT_DEV_STS0_CLK_GATE_EN                  (1 << 13)
 #define CPU_BOOT_DEV_STS0_ENABLED                      (1 << 31)
 
 enum cpu_boot_status {
@@ -204,6 +209,8 @@ enum kmd_msg {
        KMD_MSG_GOTO_WFE,
        KMD_MSG_FIT_RDY,
        KMD_MSG_SKIP_BMC,
+       RESERVED,
+       KMD_MSG_RST_DEV,
 };
 
 enum cpu_msg_status {
index 951b37da5e3ca6c8388fee08c254da61ac3d1aab..41cab297d66e744f69062b28b3a28a203f89925b 100644 (file)
@@ -55,12 +55,23 @@ static int pvpanic_mmio_probe(struct platform_device *pdev)
        struct resource *res;
 
        res = platform_get_mem_or_io(pdev, 0);
-       if (res && resource_type(res) == IORESOURCE_IO)
+       if (!res)
+               return -EINVAL;
+
+       switch (resource_type(res)) {
+       case IORESOURCE_IO:
                base = devm_ioport_map(dev, res->start, resource_size(res));
-       else
+               if (!base)
+                       return -ENOMEM;
+               break;
+       case IORESOURCE_MEM:
                base = devm_ioremap_resource(dev, res);
-       if (IS_ERR(base))
-               return PTR_ERR(base);
+               if (IS_ERR(base))
+                       return PTR_ERR(base);
+               break;
+       default:
+               return -EINVAL;
+       }
 
        atomic_notifier_chain_register(&panic_notifier_list,
                                       &pvpanic_panic_nb);
index 85ebd2b7e44615e8ac7f78d2a33aba6df0cb3593..85de5f96c02bbca4d5d95dc1e285ae739f13bbd8 100644 (file)
@@ -380,7 +380,7 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
                goto free_dst;
 
        min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len +
-               BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr);
+               BAREUDP_BASE_HLEN + info->options_len + sizeof(struct ipv6hdr);
 
        err = skb_cow_head(skb, min_headroom);
        if (unlikely(err))
@@ -534,6 +534,7 @@ static void bareudp_setup(struct net_device *dev)
        SET_NETDEV_DEVTYPE(dev, &bareudp_type);
        dev->features    |= NETIF_F_SG | NETIF_F_HW_CSUM;
        dev->features    |= NETIF_F_RXCSUM;
+       dev->features    |= NETIF_F_LLTX;
        dev->features    |= NETIF_F_GSO_SOFTWARE;
        dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
        dev->hw_features |= NETIF_F_GSO_SOFTWARE;
@@ -644,11 +645,20 @@ static int bareudp_link_config(struct net_device *dev,
        return 0;
 }
 
+static void bareudp_dellink(struct net_device *dev, struct list_head *head)
+{
+       struct bareudp_dev *bareudp = netdev_priv(dev);
+
+       list_del(&bareudp->next);
+       unregister_netdevice_queue(dev, head);
+}
+
 static int bareudp_newlink(struct net *net, struct net_device *dev,
                           struct nlattr *tb[], struct nlattr *data[],
                           struct netlink_ext_ack *extack)
 {
        struct bareudp_conf conf;
+       LIST_HEAD(list_kill);
        int err;
 
        err = bareudp2info(data, &conf, extack);
@@ -661,17 +671,14 @@ static int bareudp_newlink(struct net *net, struct net_device *dev,
 
        err = bareudp_link_config(dev, tb);
        if (err)
-               return err;
+               goto err_unconfig;
 
        return 0;
-}
-
-static void bareudp_dellink(struct net_device *dev, struct list_head *head)
-{
-       struct bareudp_dev *bareudp = netdev_priv(dev);
 
-       list_del(&bareudp->next);
-       unregister_netdevice_queue(dev, head);
+err_unconfig:
+       bareudp_dellink(dev, &list_kill);
+       unregister_netdevice_many(&list_kill);
+       return err;
 }
 
 static size_t bareudp_get_size(const struct net_device *dev)
index 424970939fd4c8760d6ee5a0424b67a8ab771042..1c28eade6becc3d7378c6d2d17c3658a93fe22c5 100644 (file)
@@ -123,6 +123,7 @@ config CAN_JANZ_ICAN3
 config CAN_KVASER_PCIEFD
        depends on PCI
        tristate "Kvaser PCIe FD cards"
+       select CRC32
          help
          This is a driver for the Kvaser PCI Express CAN FD family.
 
index 3486704c8a95736fe3b917ddbe85da6c9639dc9c..8b1ae023cb218f2db5bd3f58a28f6e91ccfa5104 100644 (file)
@@ -592,11 +592,11 @@ static void can_restart(struct net_device *dev)
 
        cf->can_id |= CAN_ERR_RESTARTED;
 
-       netif_rx_ni(skb);
-
        stats->rx_packets++;
        stats->rx_bytes += cf->len;
 
+       netif_rx_ni(skb);
+
 restart:
        netdev_dbg(dev, "restarted\n");
        priv->can_stats.restarts++;
index 2c9f1240127600b8aa62085cd698621e16a8c8b7..da551fd0f5026426170589f030bb686abdd879a8 100644 (file)
@@ -1852,8 +1852,6 @@ EXPORT_SYMBOL_GPL(m_can_class_register);
 void m_can_class_unregister(struct m_can_classdev *cdev)
 {
        unregister_candev(cdev->net);
-
-       m_can_clk_stop(cdev);
 }
 EXPORT_SYMBOL_GPL(m_can_class_unregister);
 
index 24c737c4fc446368c1daaa659684040641c1b35b..970f0e9d19bfd90451ac4831993d8aa3e2ddeedb 100644 (file)
@@ -131,30 +131,6 @@ static inline struct tcan4x5x_priv *cdev_to_priv(struct m_can_classdev *cdev)
 
 }
 
-static struct can_bittiming_const tcan4x5x_bittiming_const = {
-       .name = DEVICE_NAME,
-       .tseg1_min = 2,
-       .tseg1_max = 31,
-       .tseg2_min = 2,
-       .tseg2_max = 16,
-       .sjw_max = 16,
-       .brp_min = 1,
-       .brp_max = 32,
-       .brp_inc = 1,
-};
-
-static struct can_bittiming_const tcan4x5x_data_bittiming_const = {
-       .name = DEVICE_NAME,
-       .tseg1_min = 1,
-       .tseg1_max = 32,
-       .tseg2_min = 1,
-       .tseg2_max = 16,
-       .sjw_max = 16,
-       .brp_min = 1,
-       .brp_max = 32,
-       .brp_inc = 1,
-};
-
 static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv)
 {
        int wake_state = 0;
@@ -469,8 +445,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
        mcan_class->dev = &spi->dev;
        mcan_class->ops = &tcan4x5x_ops;
        mcan_class->is_peripheral = true;
-       mcan_class->bit_timing = &tcan4x5x_bittiming_const;
-       mcan_class->data_timing = &tcan4x5x_data_bittiming_const;
        mcan_class->net->irq = spi->irq;
 
        spi_set_drvdata(spi, priv);
index 8d36101b78e3e5e943304c9ddcb455e68d082d93..29cabc20109e585174879a5af058d60e6c1df8f5 100644 (file)
@@ -1,10 +1,10 @@
 # SPDX-License-Identifier: GPL-2.0
 config CAN_RCAR
-       tristate "Renesas R-Car CAN controller"
+       tristate "Renesas R-Car and RZ/G CAN controller"
        depends on ARCH_RENESAS || ARM
        help
          Say Y here if you want to use CAN controller found on Renesas R-Car
-         SoCs.
+         or RZ/G SoCs.
 
          To compile this driver as a module, choose M here: the module will
          be called rcar_can.
index 77129d5f410b60c1097d2b60109c5b0539c7fe2b..f07e8b737d31ee04031082ca37c0f047f34d3cf3 100644 (file)
@@ -1368,13 +1368,10 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
                struct mcp251xfd_tx_ring *tx_ring = priv->tx;
                struct spi_transfer *last_xfer;
 
-               tx_ring->tail += len;
-
                /* Increment the TEF FIFO tail pointer 'len' times in
                 * a single SPI message.
-                */
-
-               /* Note:
+                *
+                * Note:
                 *
                 * "cs_change == 1" on the last transfer results in an
                 * active chip select after the complete SPI
@@ -1391,6 +1388,8 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
                if (err)
                        return err;
 
+               tx_ring->tail += len;
+
                err = mcp251xfd_check_tef_tail(priv);
                if (err)
                        return err;
@@ -1492,7 +1491,7 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
        else
                skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
 
-       if (!cfd) {
+       if (!skb) {
                stats->rx_dropped++;
                return 0;
        }
@@ -1553,10 +1552,8 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
 
                /* Increment the RX FIFO tail pointer 'len' times in a
                 * single SPI message.
-                */
-               ring->tail += len;
-
-               /* Note:
+                *
+                * Note:
                 *
                 * "cs_change == 1" on the last transfer results in an
                 * active chip select after the complete SPI
@@ -1572,6 +1569,8 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
                last_xfer->cs_change = 1;
                if (err)
                        return err;
+
+               ring->tail += len;
        }
 
        return 0;
index 61631f4fd92a1d2dea864873431d60beca8b4318..f347ecc79aef2d5395ecc44167fbdb999eb4edef 100644 (file)
@@ -514,11 +514,11 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
        else
                memcpy(cfd->data, rm->d, cfd->len);
 
-       peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
-
        netdev->stats.rx_packets++;
        netdev->stats.rx_bytes += cfd->len;
 
+       peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
+
        return 0;
 }
 
@@ -580,11 +580,11 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
        if (!skb)
                return -ENOMEM;
 
-       peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
-
        netdev->stats.rx_packets++;
        netdev->stats.rx_bytes += cf->len;
 
+       peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
+
        return 0;
 }
 
index fa47bab510bb90cb35bcc56fe9d0275dccc57cf4..f9a524c5f6d62710722b920b615154670dfc8992 100644 (file)
@@ -39,6 +39,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
        struct net_device *peer;
        struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
        struct net_device_stats *peerstats, *srcstats = &dev->stats;
+       u8 len;
 
        if (can_dropped_invalid_skb(dev, skb))
                return NETDEV_TX_OK;
@@ -61,12 +62,13 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
        skb->dev        = peer;
        skb->ip_summed  = CHECKSUM_UNNECESSARY;
 
+       len = cfd->len;
        if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
                srcstats->tx_packets++;
-               srcstats->tx_bytes += cfd->len;
+               srcstats->tx_bytes += len;
                peerstats = &peer->stats;
                peerstats->rx_packets++;
-               peerstats->rx_bytes += cfd->len;
+               peerstats->rx_bytes += len;
        }
 
 out_unlock:
index 288b5a5c3e0dbcae40788d27f9843cd06bf3624e..95c7fa171e35acd7a04886ca9e03408b77f3ce16 100644 (file)
@@ -1404,7 +1404,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
            !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED))
                return -EINVAL;
 
-       if (vlan->vid_end > dev->num_vlans)
+       if (vlan->vid_end >= dev->num_vlans)
                return -ERANGE;
 
        b53_enable_vlan(dev, true, ds->vlan_filtering);
index 222dd35e2c9d8fa8334c992211ada879cf434ac2..e01191107a4ba0fa783ed4d9f41cf1904ab9c9ef 100644 (file)
@@ -4,6 +4,7 @@ config NET_DSA_HIRSCHMANN_HELLCREEK
        depends on HAS_IOMEM
        depends on NET_DSA
        depends on PTP_1588_CLOCK
+       depends on LEDS_CLASS
        select NET_DSA_TAG_HELLCREEK
        help
          This driver adds support for Hirschmann Hellcreek TSN switches.
index 09701c17f3f63ea46420d24ad3e5c807a14be767..662e68a0e7e61d3629b70a96cfd603e26be2d2d8 100644 (file)
@@ -92,9 +92,7 @@
                                         GSWIP_MDIO_PHY_FDUP_MASK)
 
 /* GSWIP MII Registers */
-#define GSWIP_MII_CFG0                 0x00
-#define GSWIP_MII_CFG1                 0x02
-#define GSWIP_MII_CFG5                 0x04
+#define GSWIP_MII_CFGp(p)              (0x2 * (p))
 #define  GSWIP_MII_CFG_EN              BIT(14)
 #define  GSWIP_MII_CFG_LDCLKDIS                BIT(12)
 #define  GSWIP_MII_CFG_MODE_MIIP       0x0
@@ -392,17 +390,9 @@ static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
 static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
                               int port)
 {
-       switch (port) {
-       case 0:
-               gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG0);
-               break;
-       case 1:
-               gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG1);
-               break;
-       case 5:
-               gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG5);
-               break;
-       }
+       /* There's no MII_CFG register for the CPU port */
+       if (!dsa_is_cpu_port(priv->ds, port))
+               gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
 }
 
 static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
@@ -822,9 +812,8 @@ static int gswip_setup(struct dsa_switch *ds)
        gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
 
        /* Disable the xMII link */
-       gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 0);
-       gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 1);
-       gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 5);
+       for (i = 0; i < priv->hw_info->max_ports; i++)
+               gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, i);
 
        /* enable special tag insertion on cpu port */
        gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
@@ -1447,11 +1436,12 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port,
        phylink_set(mask, Pause);
        phylink_set(mask, Asym_Pause);
 
-       /* With the exclusion of MII and Reverse MII, we support Gigabit,
-        * including Half duplex
+       /* With the exclusion of MII, Reverse MII and Reduced MII, we
+        * support Gigabit, including Half duplex
         */
        if (state->interface != PHY_INTERFACE_MODE_MII &&
-           state->interface != PHY_INTERFACE_MODE_REVMII) {
+           state->interface != PHY_INTERFACE_MODE_REVMII &&
+           state->interface != PHY_INTERFACE_MODE_RMII) {
                phylink_set(mask, 1000baseT_Full);
                phylink_set(mask, 1000baseT_Half);
        }
@@ -1541,9 +1531,7 @@ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
 {
        struct gswip_priv *priv = ds->priv;
 
-       /* Enable the xMII interface only for the external PHY */
-       if (interface != PHY_INTERFACE_MODE_INTERNAL)
-               gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
+       gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
 }
 
 static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
index 66ddf67b87371d0a9c12667b700a6996710023f9..7b96396be609e7bb1331d63c6fd11a403922e0b7 100644 (file)
@@ -351,6 +351,10 @@ int mv88e6250_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
                if (err)
                        return err;
 
+               err = mv88e6185_g1_stu_data_read(chip, entry);
+               if (err)
+                       return err;
+
                /* VTU DBNum[3:0] are located in VTU Operation 3:0
                 * VTU DBNum[5:4] are located in VTU Operation 9:8
                 */
index efb33c078a3c65b731e7aa47e8d653fddc169767..cec2018c84a9bc35327054f451f75bf4ee076482 100644 (file)
@@ -19,7 +19,6 @@ if NET_VENDOR_AQUANTIA
 config AQTION
        tristate "aQuantia AQtion(tm) Support"
        depends on PCI
-       depends on X86_64 || ARM64 || COMPILE_TEST
        depends on MACSEC || MACSEC=n
        help
          This enables the support for the aQuantia AQtion(tm) Ethernet card.
index 0fdd19d99d99fdf6c8c45e86c1dd089c28524ea6..0404aafd5ce56ddc1c1ecac402d24dd2f9c51455 100644 (file)
@@ -2503,8 +2503,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
        priv = netdev_priv(dev);
 
        priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
-       if (IS_ERR(priv->clk))
-               return PTR_ERR(priv->clk);
+       if (IS_ERR(priv->clk)) {
+               ret = PTR_ERR(priv->clk);
+               goto err_free_netdev;
+       }
 
        /* Allocate number of TX rings */
        priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
@@ -2577,6 +2579,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
                         NETIF_F_HW_VLAN_CTAG_TX;
        dev->hw_features |= dev->features;
        dev->vlan_features |= dev->features;
+       dev->max_mtu = UMAC_MAX_MTU_SIZE;
 
        /* Request the WOL interrupt and advertise suspend if available */
        priv->wol_irq_disabled = 1;
index 4edd6f8e017e468f6b9555447cf13c40bddec2a1..d10e4f85dd11a8e2214a74169675d62095f9c180 100644 (file)
@@ -6790,8 +6790,10 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
                ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
                if (!ctx->tqm_fp_rings_count)
                        ctx->tqm_fp_rings_count = bp->max_q;
+               else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
+                       ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
 
-               tqm_rings = ctx->tqm_fp_rings_count + 1;
+               tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
                ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
                if (!ctx_pg) {
                        kfree(ctx);
@@ -6925,7 +6927,8 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
             pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
             pg_dir = &req.tqm_sp_page_dir,
             ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
-            i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
+            i < BNXT_MAX_TQM_RINGS;
+            i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
                if (!(enables & ena))
                        continue;
 
@@ -12887,10 +12890,10 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
  */
 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
 {
+       pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct bnxt *bp = netdev_priv(netdev);
        int err = 0, off;
-       pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
 
        netdev_info(bp->dev, "PCI Slot Reset\n");
 
@@ -12919,22 +12922,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
                pci_save_state(pdev);
 
                err = bnxt_hwrm_func_reset(bp);
-               if (!err) {
-                       err = bnxt_hwrm_func_qcaps(bp);
-                       if (!err && netif_running(netdev))
-                               err = bnxt_open(netdev);
-               }
-               bnxt_ulp_start(bp, err);
-               if (!err) {
-                       bnxt_reenable_sriov(bp);
+               if (!err)
                        result = PCI_ERS_RESULT_RECOVERED;
-               }
-       }
-
-       if (result != PCI_ERS_RESULT_RECOVERED) {
-               if (netif_running(netdev))
-                       dev_close(netdev);
-               pci_disable_device(pdev);
        }
 
        rtnl_unlock();
@@ -12952,10 +12941,21 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
 static void bnxt_io_resume(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
+       struct bnxt *bp = netdev_priv(netdev);
+       int err;
 
+       netdev_info(bp->dev, "PCI Slot Resume\n");
        rtnl_lock();
 
-       netif_device_attach(netdev);
+       err = bnxt_hwrm_func_qcaps(bp);
+       if (!err && netif_running(netdev))
+               err = bnxt_open(netdev);
+
+       bnxt_ulp_start(bp, err);
+       if (!err) {
+               bnxt_reenable_sriov(bp);
+               netif_device_attach(netdev);
+       }
 
        rtnl_unlock();
 }
index 950ea26ae0d26effcb8c497162e8f9fbd7faacab..51996c85547eeaa87f251236048110c19e316ccd 100644 (file)
@@ -1436,6 +1436,11 @@ struct bnxt_ctx_pg_info {
        struct bnxt_ctx_pg_info **ctx_pg_tbl;
 };
 
+#define BNXT_MAX_TQM_SP_RINGS          1
+#define BNXT_MAX_TQM_FP_RINGS          8
+#define BNXT_MAX_TQM_RINGS             \
+       (BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_RINGS)
+
 struct bnxt_ctx_mem_info {
        u32     qp_max_entries;
        u16     qp_min_qp1_entries;
@@ -1474,7 +1479,7 @@ struct bnxt_ctx_mem_info {
        struct bnxt_ctx_pg_info stat_mem;
        struct bnxt_ctx_pg_info mrav_mem;
        struct bnxt_ctx_pg_info tim_mem;
-       struct bnxt_ctx_pg_info *tqm_mem[9];
+       struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
 };
 
 struct bnxt_fw_health {
index 9ff79d5d14c4c7559e4a54526c54e33987794b50..2f8b193a772dac6e6839fad460a24a174821e7a4 100644 (file)
@@ -2532,7 +2532,7 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
 
                if (rc && ((struct hwrm_err_output *)&resp)->cmd_err ==
                    NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
-                       install.flags |=
+                       install.flags =
                                cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
 
                        rc = _hwrm_send_message_silent(bp, &install,
@@ -2546,6 +2546,7 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
                                 * UPDATE directory and try the flash again
                                 */
                                defrag_attempted = true;
+                               install.flags = 0;
                                rc = __bnxt_flash_nvram(bp->dev,
                                                        BNX_DIR_TYPE_UPDATE,
                                                        BNX_DIR_ORDINAL_FIRST,
index 8c8368c2f335cb3671dda19e7e58af64d5061ad5..64dbbb04b04346719b46e5e4214ce095d7c9947d 100644 (file)
@@ -222,8 +222,12 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp)
 
 int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
 {
-       if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
-               return BNXT_MIN_ROCE_STAT_CTXS;
+       if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
+               struct bnxt_en_dev *edev = bp->edev;
+
+               if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
+                       return BNXT_MIN_ROCE_STAT_CTXS;
+       }
 
        return 0;
 }
index d5d910916c2e88554922009eb3ec70fe3e912c15..814a5b10141d19038fd3ffc17cc5387b18c7aa19 100644 (file)
@@ -467,7 +467,7 @@ static void macb_set_tx_clk(struct macb *bp, int speed)
 {
        long ferr, rate, rate_rounded;
 
-       if (!bp->tx_clk || !(bp->caps & MACB_CAPS_CLK_HW_CHG))
+       if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG))
                return;
 
        switch (speed) {
index 92473dda55d9f8238f6c5c3a6ebf168349b3f8b0..22a0220123adeba947911924d553fdf9c446402f 100644 (file)
 #define TCB_L2T_IX_M           0xfffULL
 #define TCB_L2T_IX_V(x)                ((x) << TCB_L2T_IX_S)
 
+#define TCB_T_FLAGS_W           1
+#define TCB_T_FLAGS_S           0
+#define TCB_T_FLAGS_M           0xffffffffffffffffULL
+#define TCB_T_FLAGS_V(x)        ((__u64)(x) << TCB_T_FLAGS_S)
+
+#define TCB_FIELD_COOKIE_TFLAG 1
+
 #define TCB_SMAC_SEL_W         0
 #define TCB_SMAC_SEL_S         24
 #define TCB_SMAC_SEL_M         0xffULL
index 72bb123d53db73e58875431784c2bf3fa351d594..9e237801364210ed337a135f2825b77e054037ab 100644 (file)
@@ -575,7 +575,11 @@ int send_tx_flowc_wr(struct sock *sk, int compl,
 void chtls_tcp_push(struct sock *sk, int flags);
 int chtls_push_frames(struct chtls_sock *csk, int comp);
 int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val);
+void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word,
+                                u64 mask, u64 val, u8 cookie,
+                                int through_l2t);
 int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode, int cipher_type);
+void chtls_set_quiesce_ctrl(struct sock *sk, int val);
 void skb_entail(struct sock *sk, struct sk_buff *skb, int flags);
 unsigned int keyid_to_addr(int start_addr, int keyid);
 void free_tls_keyid(struct sock *sk);
index a0e0d8a83681beb907f3d1a74b29036b19c08a9c..e5cfbe196ba664decb2a32216a47a1803373d947 100644 (file)
@@ -32,6 +32,7 @@
 #include "chtls.h"
 #include "chtls_cm.h"
 #include "clip_tbl.h"
+#include "t4_tcb.h"
 
 /*
  * State transitions and actions for close.  Note that if we are in SYN_SENT
@@ -267,7 +268,9 @@ static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb)
        if (sk->sk_state != TCP_SYN_RECV)
                chtls_send_abort(sk, mode, skb);
        else
-               goto out;
+               chtls_set_tcb_field_rpl_skb(sk, TCB_T_FLAGS_W,
+                                           TCB_T_FLAGS_V(TCB_T_FLAGS_M), 0,
+                                           TCB_FIELD_COOKIE_TFLAG, 1);
 
        return;
 out:
@@ -621,7 +624,7 @@ static void chtls_reset_synq(struct listen_ctx *listen_ctx)
 
        while (!skb_queue_empty(&listen_ctx->synq)) {
                struct chtls_sock *csk =
-                       container_of((struct synq *)__skb_dequeue
+                       container_of((struct synq *)skb_peek
                                (&listen_ctx->synq), struct chtls_sock, synq);
                struct sock *child = csk->sk;
 
@@ -1109,6 +1112,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
                                    const struct cpl_pass_accept_req *req,
                                    struct chtls_dev *cdev)
 {
+       struct adapter *adap = pci_get_drvdata(cdev->pdev);
        struct neighbour *n = NULL;
        struct inet_sock *newinet;
        const struct iphdr *iph;
@@ -1118,9 +1122,10 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
        struct dst_entry *dst;
        struct tcp_sock *tp;
        struct sock *newsk;
+       bool found = false;
        u16 port_id;
        int rxq_idx;
-       int step;
+       int step, i;
 
        iph = (const struct iphdr *)network_hdr;
        newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb);
@@ -1152,7 +1157,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
                n = dst_neigh_lookup(dst, &ip6h->saddr);
 #endif
        }
-       if (!n)
+       if (!n || !n->dev)
                goto free_sk;
 
        ndev = n->dev;
@@ -1161,6 +1166,13 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
        if (is_vlan_dev(ndev))
                ndev = vlan_dev_real_dev(ndev);
 
+       for_each_port(adap, i)
+               if (cdev->ports[i] == ndev)
+                       found = true;
+
+       if (!found)
+               goto free_dst;
+
        port_id = cxgb4_port_idx(ndev);
 
        csk = chtls_sock_create(cdev);
@@ -1238,6 +1250,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
 free_csk:
        chtls_sock_release(&csk->kref);
 free_dst:
+       neigh_release(n);
        dst_release(dst);
 free_sk:
        inet_csk_prepare_forced_close(newsk);
@@ -1387,7 +1400,7 @@ static void chtls_pass_accept_request(struct sock *sk,
 
        newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
        if (!newsk)
-               goto free_oreq;
+               goto reject;
 
        if (chtls_get_module(newsk))
                goto reject;
@@ -1403,8 +1416,6 @@ static void chtls_pass_accept_request(struct sock *sk,
        kfree_skb(skb);
        return;
 
-free_oreq:
-       chtls_reqsk_free(oreq);
 reject:
        mk_tid_release(reply_skb, 0, tid);
        cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
@@ -1589,6 +1600,11 @@ static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb)
                        sk_wake_async(sk, 0, POLL_OUT);
 
                data = lookup_stid(cdev->tids, stid);
+               if (!data) {
+                       /* listening server close */
+                       kfree_skb(skb);
+                       goto unlock;
+               }
                lsk = ((struct listen_ctx *)data)->lsk;
 
                bh_lock_sock(lsk);
@@ -1936,6 +1952,8 @@ static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
                else if (tcp_sk(sk)->linger2 < 0 &&
                         !csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN))
                        chtls_abort_conn(sk, skb);
+               else if (csk_flag_nochk(csk, CSK_TX_DATA_SENT))
+                       chtls_set_quiesce_ctrl(sk, 0);
                break;
        default:
                pr_info("close_con_rpl in bad state %d\n", sk->sk_state);
@@ -1997,39 +2015,6 @@ static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
        spin_unlock_bh(&cdev->deferq.lock);
 }
 
-static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
-                          struct chtls_dev *cdev, int status, int queue)
-{
-       struct cpl_abort_req_rss *req = cplhdr(skb);
-       struct sk_buff *reply_skb;
-       struct chtls_sock *csk;
-
-       csk = rcu_dereference_sk_user_data(sk);
-
-       reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
-                             GFP_KERNEL);
-
-       if (!reply_skb) {
-               req->status = (queue << 1);
-               t4_defer_reply(skb, cdev, send_defer_abort_rpl);
-               return;
-       }
-
-       set_abort_rpl_wr(reply_skb, GET_TID(req), status);
-       kfree_skb(skb);
-
-       set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
-       if (csk_conn_inline(csk)) {
-               struct l2t_entry *e = csk->l2t_entry;
-
-               if (e && sk->sk_state != TCP_SYN_RECV) {
-                       cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
-                       return;
-               }
-       }
-       cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
-}
-
 static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
                                 struct chtls_dev *cdev,
                                 int status, int queue)
@@ -2078,9 +2063,9 @@ static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb)
        queue = csk->txq_idx;
 
        skb->sk = NULL;
+       chtls_send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
+                            CPL_ABORT_NO_RST, queue);
        do_abort_syn_rcv(child, lsk);
-       send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
-                      CPL_ABORT_NO_RST, queue);
 }
 
 static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
@@ -2110,8 +2095,8 @@ static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
        if (!sock_owned_by_user(psk)) {
                int queue = csk->txq_idx;
 
+               chtls_send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
                do_abort_syn_rcv(sk, psk);
-               send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
        } else {
                skb->sk = sk;
                BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv;
@@ -2129,9 +2114,6 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
        int queue = csk->txq_idx;
 
        if (is_neg_adv(req->status)) {
-               if (sk->sk_state == TCP_SYN_RECV)
-                       chtls_set_tcb_tflag(sk, 0, 0);
-
                kfree_skb(skb);
                return;
        }
@@ -2158,12 +2140,12 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
                if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
                        return;
 
-               chtls_release_resources(sk);
-               chtls_conn_done(sk);
        }
 
        chtls_send_abort_rpl(sk, skb, BLOG_SKB_CB(skb)->cdev,
                             rst_status, queue);
+       chtls_release_resources(sk);
+       chtls_conn_done(sk);
 }
 
 static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
@@ -2315,6 +2297,28 @@ static int chtls_wr_ack(struct chtls_dev *cdev, struct sk_buff *skb)
        return 0;
 }
 
+static int chtls_set_tcb_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+       struct cpl_set_tcb_rpl *rpl = cplhdr(skb) + RSS_HDR;
+       unsigned int hwtid = GET_TID(rpl);
+       struct sock *sk;
+
+       sk = lookup_tid(cdev->tids, hwtid);
+
+       /* return EINVAL if socket doesn't exist */
+       if (!sk)
+               return -EINVAL;
+
+       /* Reusing the skb as size of cpl_set_tcb_field structure
+        * is greater than cpl_abort_req
+        */
+       if (TCB_COOKIE_G(rpl->cookie) == TCB_FIELD_COOKIE_TFLAG)
+               chtls_send_abort(sk, CPL_ABORT_SEND_RST, NULL);
+
+       kfree_skb(skb);
+       return 0;
+}
+
 chtls_handler_func chtls_handlers[NUM_CPL_CMDS] = {
        [CPL_PASS_OPEN_RPL]     = chtls_pass_open_rpl,
        [CPL_CLOSE_LISTSRV_RPL] = chtls_close_listsrv_rpl,
@@ -2327,5 +2331,6 @@ chtls_handler_func chtls_handlers[NUM_CPL_CMDS] = {
        [CPL_CLOSE_CON_RPL]     = chtls_conn_cpl,
        [CPL_ABORT_REQ_RSS]     = chtls_conn_cpl,
        [CPL_ABORT_RPL_RSS]     = chtls_conn_cpl,
-       [CPL_FW4_ACK]           = chtls_wr_ack,
+       [CPL_FW4_ACK]           = chtls_wr_ack,
+       [CPL_SET_TCB_RPL]       = chtls_set_tcb_rpl,
 };
index a4fb463af22ac335c425532787f4b8a7bb91e11d..1e67140b0f8013633566ca244c4a5d04b4869251 100644 (file)
@@ -88,6 +88,24 @@ static int chtls_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val)
        return ret < 0 ? ret : 0;
 }
 
+void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word,
+                                u64 mask, u64 val, u8 cookie,
+                                int through_l2t)
+{
+       struct sk_buff *skb;
+       unsigned int wrlen;
+
+       wrlen = sizeof(struct cpl_set_tcb_field) + sizeof(struct ulptx_idata);
+       wrlen = roundup(wrlen, 16);
+
+       skb = alloc_skb(wrlen, GFP_KERNEL | __GFP_NOFAIL);
+       if (!skb)
+               return;
+
+       __set_tcb_field(sk, skb, word, mask, val, cookie, 0);
+       send_or_defer(sk, tcp_sk(sk), skb, through_l2t);
+}
+
 /*
  * Set one of the t_flags bits in the TCB.
  */
@@ -113,6 +131,29 @@ static int chtls_set_tcb_quiesce(struct sock *sk, int val)
                                   TF_RX_QUIESCE_V(val));
 }
 
+void chtls_set_quiesce_ctrl(struct sock *sk, int val)
+{
+       struct chtls_sock *csk;
+       struct sk_buff *skb;
+       unsigned int wrlen;
+       int ret;
+
+       wrlen = sizeof(struct cpl_set_tcb_field) + sizeof(struct ulptx_idata);
+       wrlen = roundup(wrlen, 16);
+
+       skb = alloc_skb(wrlen, GFP_ATOMIC);
+       if (!skb)
+               return;
+
+       csk = rcu_dereference_sk_user_data(sk);
+
+       __set_tcb_field(sk, skb, 1, TF_RX_QUIESCE_V(1), 0, 0, 1);
+       set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
+       ret = cxgb4_ofld_send(csk->egress_dev, skb);
+       if (ret < 0)
+               kfree_skb(skb);
+}
+
 /* TLS Key bitmap processing */
 int chtls_init_kmap(struct chtls_dev *cdev, struct cxgb4_lld_info *lldi)
 {
index 0981fe9652e501e3567b81d99214fab1051406cb..3d9b0b161e241cb5618edffba92466737f52eddb 100644 (file)
@@ -1211,7 +1211,7 @@ static int ethoc_probe(struct platform_device *pdev)
        ret = mdiobus_register(priv->mdio);
        if (ret) {
                dev_err(&netdev->dev, "failed to register MDIO bus\n");
-               goto free2;
+               goto free3;
        }
 
        ret = ethoc_mdio_probe(netdev);
@@ -1243,6 +1243,7 @@ error2:
        netif_napi_del(&priv->napi);
 error:
        mdiobus_unregister(priv->mdio);
+free3:
        mdiobus_free(priv->mdio);
 free2:
        clk_disable_unprepare(priv->clk);
index c8e5d889bd81fb73c1289975ddf0d5686d236162..21de56345503f6cc82763cb42d3518f09b4fe639 100644 (file)
@@ -223,3 +223,4 @@ static struct platform_driver fs_enet_bb_mdio_driver = {
 };
 
 module_platform_driver(fs_enet_bb_mdio_driver);
+MODULE_LICENSE("GPL");
index 8b51ee142fa3c3c113231a42b81dc2ad488726a0..152f4d83765aad7df1d9eefa2fbbce1b34d9a005 100644 (file)
@@ -224,3 +224,4 @@ static struct platform_driver fs_enet_fec_mdio_driver = {
 };
 
 module_platform_driver(fs_enet_fec_mdio_driver);
+MODULE_LICENSE("GPL");
index ba8869c3d891c0e05e29cd4fb1bcb5b17ce775ba..6d853f018d53133d47fa5a6236174a64a1f5f370 100644 (file)
@@ -3889,6 +3889,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
        INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
        netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
        dev->mtu = 1500;
+       dev->max_mtu = 1518;
 
        ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
        ugeth->phy_interface = phy_interface;
@@ -3934,12 +3935,12 @@ static int ucc_geth_remove(struct platform_device* ofdev)
        struct device_node *np = ofdev->dev.of_node;
 
        unregister_netdev(dev);
-       free_netdev(dev);
        ucc_geth_memclean(ugeth);
        if (of_phy_is_fixed_link(np))
                of_phy_deregister_fixed_link(np);
        of_node_put(ugeth->ug_info->tbi_node);
        of_node_put(ugeth->ug_info->phy_node);
+       free_netdev(dev);
 
        return 0;
 }
index 1a9bdf66a7d81214f5e15fbed436a204597b59cc..11d4bf5dc21f73e2a6d4b34cef723474eb24899f 100644 (file)
@@ -575,7 +575,14 @@ struct ucc_geth_tx_global_pram {
        u32 vtagtable[0x8];     /* 8 4-byte VLAN tags */
        u32 tqptr;              /* a base pointer to the Tx Queues Memory
                                   Region */
-       u8 res2[0x80 - 0x74];
+       u8 res2[0x78 - 0x74];
+       u64 snums_en;
+       u32 l2l3baseptr;        /* top byte consists of a few other bit fields */
+
+       u16 mtu[8];
+       u8 res3[0xa8 - 0x94];
+       u32 wrrtablebase;       /* top byte is reserved */
+       u8 res4[0xc0 - 0xac];
 } __packed;
 
 /* structure representing Extended Filtering Global Parameters in PRAM */
index 7165da0ee9aa50370ab02484344fb9a0daf89692..a6e3f07caf99c323c983227f9e7170b959830f4e 100644 (file)
@@ -415,6 +415,10 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data,
        /* for mutl buffer*/
        new_skb = skb_copy(skb, GFP_ATOMIC);
        dev_kfree_skb_any(skb);
+       if (!new_skb) {
+               netdev_err(ndev, "skb alloc failed\n");
+               return;
+       }
        skb = new_skb;
 
        check_ok = 0;
index fb5e8842983c2fa5da6c1358d8d4b41e5aa7945a..33defa4c180ae58031345881f2fbf639dfcd359a 100644 (file)
@@ -169,7 +169,7 @@ struct hclgevf_mbx_arq_ring {
 #define hclge_mbx_ring_ptr_move_crq(crq) \
        (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
 #define hclge_mbx_tail_ptr_move_arq(arq) \
-       (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
+               (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
 #define hclge_mbx_head_ptr_move_arq(arq) \
-               (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
+               (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
 #endif
index e6f37f91c489d1aa218ae124d7835b4d558875dc..c242883fea5db8af20ac10d629b2d7350990c33a 100644 (file)
@@ -752,7 +752,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
                handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
                handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
 
-               if (hdev->hw.mac.phydev) {
+               if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
+                   hdev->hw.mac.phydev->drv->set_loopback) {
                        count += 1;
                        handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
                }
@@ -4537,8 +4538,8 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
                req->ipv4_sctp_en = tuple_sets;
                break;
        case SCTP_V6_FLOW:
-               if ((nfc->data & RXH_L4_B_0_1) ||
-                   (nfc->data & RXH_L4_B_2_3))
+               if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
+                   (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
                        return -EINVAL;
 
                req->ipv6_sctp_en = tuple_sets;
@@ -4730,6 +4731,8 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
                vport[i].rss_tuple_sets.ipv6_udp_en =
                        HCLGE_RSS_INPUT_TUPLE_OTHER;
                vport[i].rss_tuple_sets.ipv6_sctp_en =
+                       hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
+                       HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
                        HCLGE_RSS_INPUT_TUPLE_SCTP;
                vport[i].rss_tuple_sets.ipv6_fragment_en =
                        HCLGE_RSS_INPUT_TUPLE_OTHER;
index 50a294dfaff50c13ef62272882ae6d5b1ecf3e9f..ca46bc9110d7d7d1f85358e8d4b2a6690af18ccb 100644 (file)
 #define HCLGE_D_IP_BIT                 BIT(2)
 #define HCLGE_S_IP_BIT                 BIT(3)
 #define HCLGE_V_TAG_BIT                        BIT(4)
+#define HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT     \
+               (HCLGE_D_IP_BIT | HCLGE_S_IP_BIT | HCLGE_V_TAG_BIT)
 
 #define HCLGE_RSS_TC_SIZE_0            1
 #define HCLGE_RSS_TC_SIZE_1            2
index 145757cb70f9f0bcdae2a8d67210c6a6d6b82ea1..674b3a22e91fe13da9670be73b34b64ca5c74493 100644 (file)
@@ -917,8 +917,8 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
                req->ipv4_sctp_en = tuple_sets;
                break;
        case SCTP_V6_FLOW:
-               if ((nfc->data & RXH_L4_B_0_1) ||
-                   (nfc->data & RXH_L4_B_2_3))
+               if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
+                   (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
                        return -EINVAL;
 
                req->ipv6_sctp_en = tuple_sets;
@@ -2502,7 +2502,10 @@ static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
                tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
                tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
                tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
-               tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
+               tuple_sets->ipv6_sctp_en =
+                       hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
+                                       HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT :
+                                       HCLGEVF_RSS_INPUT_TUPLE_SCTP;
                tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
        }
 
index 1b183bc35604b4d780ef439e33ddee5c818e1f73..f6d817a3edcb35f72e0c7b3ace40fc8cefab8bc8 100644 (file)
 #define HCLGEVF_D_IP_BIT               BIT(2)
 #define HCLGEVF_S_IP_BIT               BIT(3)
 #define HCLGEVF_V_TAG_BIT              BIT(4)
+#define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT   \
+       (HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT)
 
 #define HCLGEVF_STATS_TIMER_INTERVAL   36U
 
index f302504faa8a58c844915584f6562b41c50ca093..9778c83150f1cc4508864d593bb5d33487c416db 100644 (file)
@@ -955,6 +955,7 @@ static void release_resources(struct ibmvnic_adapter *adapter)
        release_rx_pools(adapter);
 
        release_napi(adapter);
+       release_login_buffer(adapter);
        release_login_rsp_buffer(adapter);
 }
 
@@ -2341,8 +2342,7 @@ static void __ibmvnic_reset(struct work_struct *work)
                                set_current_state(TASK_UNINTERRUPTIBLE);
                                schedule_timeout(60 * HZ);
                        }
-               } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
-                               adapter->from_passive_init)) {
+               } else {
                        rc = do_reset(adapter, rwi, reset_state);
                }
                kfree(rwi);
@@ -2981,9 +2981,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
        int rc;
 
        if (!scrq) {
-               netdev_dbg(adapter->netdev,
-                          "Invalid scrq reset. irq (%d) or msgs (%p).\n",
-                          scrq->irq, scrq->msgs);
+               netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
                return -EINVAL;
        }
 
@@ -3873,7 +3871,9 @@ static int send_login(struct ibmvnic_adapter *adapter)
                return -1;
        }
 
+       release_login_buffer(adapter);
        release_login_rsp_buffer(adapter);
+
        client_data_len = vnic_client_data_len(adapter);
 
        buffer_size =
index ba7a0f8f693763e74ddd2582d07716e50c2f8f28..5b2143f4b1f85fada39e3509508d7ed31be46abb 100644 (file)
@@ -436,6 +436,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
 #define FLAG2_DFLT_CRC_STRIPPING          BIT(12)
 #define FLAG2_CHECK_RX_HWTSTAMP           BIT(13)
 #define FLAG2_CHECK_SYSTIM_OVERFLOW       BIT(14)
+#define FLAG2_ENABLE_S0IX_FLOWS           BIT(15)
 
 #define E1000_RX_DESC_PS(R, i)     \
        (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
index 03215b0aee4bd349cab3793d1e38282c78e9cece..06442e6bef7310e41852bdb727d1b2c3c583a050 100644 (file)
@@ -23,6 +23,13 @@ struct e1000_stats {
        int stat_offset;
 };
 
+static const char e1000e_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define E1000E_PRIV_FLAGS_S0IX_ENABLED BIT(0)
+       "s0ix-enabled",
+};
+
+#define E1000E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(e1000e_priv_flags_strings)
+
 #define E1000_STAT(str, m) { \
                .stat_string = str, \
                .type = E1000_STATS, \
@@ -1776,6 +1783,8 @@ static int e1000e_get_sset_count(struct net_device __always_unused *netdev,
                return E1000_TEST_LEN;
        case ETH_SS_STATS:
                return E1000_STATS_LEN;
+       case ETH_SS_PRIV_FLAGS:
+               return E1000E_PRIV_FLAGS_STR_LEN;
        default:
                return -EOPNOTSUPP;
        }
@@ -2097,6 +2106,10 @@ static void e1000_get_strings(struct net_device __always_unused *netdev,
                        p += ETH_GSTRING_LEN;
                }
                break;
+       case ETH_SS_PRIV_FLAGS:
+               memcpy(data, e1000e_priv_flags_strings,
+                      E1000E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+               break;
        }
 }
 
@@ -2305,6 +2318,37 @@ static int e1000e_get_ts_info(struct net_device *netdev,
        return 0;
 }
 
+static u32 e1000e_get_priv_flags(struct net_device *netdev)
+{
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+       u32 priv_flags = 0;
+
+       if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
+               priv_flags |= E1000E_PRIV_FLAGS_S0IX_ENABLED;
+
+       return priv_flags;
+}
+
+static int e1000e_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+       unsigned int flags2 = adapter->flags2;
+
+       flags2 &= ~FLAG2_ENABLE_S0IX_FLOWS;
+       if (priv_flags & E1000E_PRIV_FLAGS_S0IX_ENABLED) {
+               struct e1000_hw *hw = &adapter->hw;
+
+               if (hw->mac.type < e1000_pch_cnp)
+                       return -EINVAL;
+               flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
+       }
+
+       if (flags2 != adapter->flags2)
+               adapter->flags2 = flags2;
+
+       return 0;
+}
+
 static const struct ethtool_ops e1000_ethtool_ops = {
        .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
        .get_drvinfo            = e1000_get_drvinfo,
@@ -2336,6 +2380,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
        .set_eee                = e1000e_set_eee,
        .get_link_ksettings     = e1000_get_link_ksettings,
        .set_link_ksettings     = e1000_set_link_ksettings,
+       .get_priv_flags         = e1000e_get_priv_flags,
+       .set_priv_flags         = e1000e_set_priv_flags,
 };
 
 void e1000e_set_ethtool_ops(struct net_device *netdev)
index 9aa6fad8ed47723c0f04b634a4429fd1f8713a59..6fb46682b058a297c701ab3c14c8dde7a9f670a9 100644 (file)
@@ -1240,6 +1240,9 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
                return 0;
 
        if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+               struct e1000_adapter *adapter = hw->adapter;
+               bool firmware_bug = false;
+
                if (force) {
                        /* Request ME un-configure ULP mode in the PHY */
                        mac_reg = er32(H2ME);
@@ -1248,16 +1251,24 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
                        ew32(H2ME, mac_reg);
                }
 
-               /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
+               /* Poll up to 2.5 seconds for ME to clear ULP_CFG_DONE.
+                * If this takes more than 1 second, show a warning indicating a
+                * firmware bug
+                */
                while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
-                       if (i++ == 30) {
+                       if (i++ == 250) {
                                ret_val = -E1000_ERR_PHY;
                                goto out;
                        }
+                       if (i > 100 && !firmware_bug)
+                               firmware_bug = true;
 
                        usleep_range(10000, 11000);
                }
-               e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
+               if (firmware_bug)
+                       e_warn("ULP_CONFIG_DONE took %dmsec.  This is a firmware bug\n", i * 10);
+               else
+                       e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
 
                if (force) {
                        mac_reg = er32(H2ME);
index 128ab6898070e0262e0756fb6ef6c9209ab3577c..e9b82c209c2df60534a2eef83fe5d8e230846554 100644 (file)
@@ -103,45 +103,6 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
        {0, NULL}
 };
 
-struct e1000e_me_supported {
-       u16 device_id;          /* supported device ID */
-};
-
-static const struct e1000e_me_supported me_supported[] = {
-       {E1000_DEV_ID_PCH_LPT_I217_LM},
-       {E1000_DEV_ID_PCH_LPTLP_I218_LM},
-       {E1000_DEV_ID_PCH_I218_LM2},
-       {E1000_DEV_ID_PCH_I218_LM3},
-       {E1000_DEV_ID_PCH_SPT_I219_LM},
-       {E1000_DEV_ID_PCH_SPT_I219_LM2},
-       {E1000_DEV_ID_PCH_LBG_I219_LM3},
-       {E1000_DEV_ID_PCH_SPT_I219_LM4},
-       {E1000_DEV_ID_PCH_SPT_I219_LM5},
-       {E1000_DEV_ID_PCH_CNP_I219_LM6},
-       {E1000_DEV_ID_PCH_CNP_I219_LM7},
-       {E1000_DEV_ID_PCH_ICP_I219_LM8},
-       {E1000_DEV_ID_PCH_ICP_I219_LM9},
-       {E1000_DEV_ID_PCH_CMP_I219_LM10},
-       {E1000_DEV_ID_PCH_CMP_I219_LM11},
-       {E1000_DEV_ID_PCH_CMP_I219_LM12},
-       {E1000_DEV_ID_PCH_TGP_I219_LM13},
-       {E1000_DEV_ID_PCH_TGP_I219_LM14},
-       {E1000_DEV_ID_PCH_TGP_I219_LM15},
-       {0}
-};
-
-static bool e1000e_check_me(u16 device_id)
-{
-       struct e1000e_me_supported *id;
-
-       for (id = (struct e1000e_me_supported *)me_supported;
-            id->device_id; id++)
-               if (device_id == id->device_id)
-                       return true;
-
-       return false;
-}
-
 /**
  * __ew32_prepare - prepare to write to MAC CSR register on certain parts
  * @hw: pointer to the HW structure
@@ -6962,7 +6923,6 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
        struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct pci_dev *pdev = to_pci_dev(dev);
-       struct e1000_hw *hw = &adapter->hw;
        int rc;
 
        e1000e_flush_lpic(pdev);
@@ -6970,13 +6930,13 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
        e1000e_pm_freeze(dev);
 
        rc = __e1000_shutdown(pdev, false);
-       if (rc)
+       if (rc) {
                e1000e_pm_thaw(dev);
-
-       /* Introduce S0ix implementation */
-       if (hw->mac.type >= e1000_pch_cnp &&
-           !e1000e_check_me(hw->adapter->pdev->device))
-               e1000e_s0ix_entry_flow(adapter);
+       } else {
+               /* Introduce S0ix implementation */
+               if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
+                       e1000e_s0ix_entry_flow(adapter);
+       }
 
        return rc;
 }
@@ -6986,12 +6946,10 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
        struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct pci_dev *pdev = to_pci_dev(dev);
-       struct e1000_hw *hw = &adapter->hw;
        int rc;
 
        /* Introduce S0ix implementation */
-       if (hw->mac.type >= e1000_pch_cnp &&
-           !e1000e_check_me(hw->adapter->pdev->device))
+       if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
                e1000e_s0ix_exit_flow(adapter);
 
        rc = __e1000_resume(pdev);
@@ -7655,6 +7613,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!(adapter->flags & FLAG_HAS_AMT))
                e1000e_get_hw_control(adapter);
 
+       if (hw->mac.type >= e1000_pch_cnp)
+               adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
+
        strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
        err = register_netdev(netdev);
        if (err)
index d231a2cdd98ff244acb0c796dea11c84767e1404..118473dfdcbd248b0472da7f32ae31dffcc5e1d5 100644 (file)
@@ -120,6 +120,7 @@ enum i40e_state_t {
        __I40E_RESET_INTR_RECEIVED,
        __I40E_REINIT_REQUESTED,
        __I40E_PF_RESET_REQUESTED,
+       __I40E_PF_RESET_AND_REBUILD_REQUESTED,
        __I40E_CORE_RESET_REQUESTED,
        __I40E_GLOBAL_RESET_REQUESTED,
        __I40E_EMP_RESET_INTR_RECEIVED,
@@ -146,6 +147,8 @@ enum i40e_state_t {
 };
 
 #define I40E_PF_RESET_FLAG     BIT_ULL(__I40E_PF_RESET_REQUESTED)
+#define I40E_PF_RESET_AND_REBUILD_FLAG \
+       BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED)
 
 /* VSI state flags */
 enum i40e_vsi_state_t {
index 1337686bd0998003e1a92a56f03ed7d8d8a99a3e..1db482d310c2d5ad1ca978ec7d47ee8b4f77221b 100644 (file)
@@ -36,6 +36,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf);
 static void i40e_determine_queue_usage(struct i40e_pf *pf);
 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
+                                  bool lock_acquired);
 static int i40e_reset(struct i40e_pf *pf);
 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
@@ -8536,6 +8538,14 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
                         "FW LLDP is disabled\n" :
                         "FW LLDP is enabled\n");
 
+       } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
+               /* Request a PF Reset
+                *
+                * Resets PF and reinitializes PFs VSI.
+                */
+               i40e_prep_for_reset(pf, lock_acquired);
+               i40e_reset_and_rebuild(pf, true, lock_acquired);
+
        } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
                int v;
 
index 729c4f0d5ac5299a10b9862d7143d724b6eb88c0..21ee56420c3aee60f1428ebf2566611addd34c04 100644 (file)
@@ -1772,7 +1772,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
        if (num_vfs) {
                if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
                        pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
-                       i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+                       i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
                }
                ret = i40e_pci_sriov_enable(pdev, num_vfs);
                goto sriov_configure_out;
@@ -1781,7 +1781,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
        if (!pci_vfs_assigned(pf->pdev)) {
                i40e_free_vfs(pf);
                pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
-               i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+               i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
        } else {
                dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
                ret = -EINVAL;
index 47eb9c584a123ae92af5f6a8a36cf1be66296832..492ce213208d2a1d91d95a5ad7163a16af7ec668 100644 (file)
@@ -348,12 +348,12 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
                 * SBP is *not* set in PRT_SBPVSI (default not set).
                 */
                skb = i40e_construct_skb_zc(rx_ring, *bi);
-               *bi = NULL;
                if (!skb) {
                        rx_ring->rx_stats.alloc_buff_failed++;
                        break;
                }
 
+               *bi = NULL;
                cleaned_count++;
                i40e_inc_ntc(rx_ring);
 
index 95543dfd4fe77c73023fa9b96b62de476fd7c909..0a867d64d46753ae4f065ce479b4f4a139f38067 100644 (file)
@@ -1834,11 +1834,9 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
        netif_tx_stop_all_queues(netdev);
        if (CLIENT_ALLOWED(adapter)) {
                err = iavf_lan_add_device(adapter);
-               if (err) {
-                       rtnl_unlock();
+               if (err)
                        dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
                                 err);
-               }
        }
        dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
        if (netdev->features & NETIF_F_GRO)
index 563ceac3060f9364333f62229c388660f00bdefa..bc4d8d14440199b5d3d548204d010927ae1802d6 100644 (file)
@@ -4432,7 +4432,7 @@ static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
        struct bpf_prog *old_prog;
 
        if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
-               NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
+               NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
                return -EOPNOTSUPP;
        }
 
@@ -5255,7 +5255,7 @@ static int mvneta_probe(struct platform_device *pdev)
        err = mvneta_port_power_up(pp, pp->phy_interface);
        if (err < 0) {
                dev_err(&pdev->dev, "can't power up port\n");
-               return err;
+               goto err_netdev;
        }
 
        /* Armada3700 network controller does not support per-cpu
index afdd22827223bb7bb7f787ca2d5a17ffcdeffaaf..358119d98358200130bbe76d6507d1fe52a334a8 100644 (file)
@@ -1231,7 +1231,7 @@ static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
 
        regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
        if (port->gop_id == 2)
-               val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
+               val |= GENCONF_CTRL0_PORT0_RGMII;
        else if (port->gop_id == 3)
                val |= GENCONF_CTRL0_PORT1_RGMII_MII;
        regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
@@ -2370,17 +2370,18 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
                                   struct mvpp2_tx_queue *txq)
 {
-       unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+       unsigned int thread;
        u32 val;
 
        if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
                txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
 
        val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
-       mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
-       mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
-
-       put_cpu();
+       /* PKT-coalescing registers are per-queue + per-thread */
+       for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) {
+               mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+               mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
+       }
 }
 
 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
@@ -5487,7 +5488,7 @@ static int mvpp2_port_init(struct mvpp2_port *port)
        struct mvpp2 *priv = port->priv;
        struct mvpp2_txq_pcpu *txq_pcpu;
        unsigned int thread;
-       int queue, err;
+       int queue, err, val;
 
        /* Checks for hardware constraints */
        if (port->first_rxq + port->nrxqs >
@@ -5501,6 +5502,18 @@ static int mvpp2_port_init(struct mvpp2_port *port)
        mvpp2_egress_disable(port);
        mvpp2_port_disable(port);
 
+       if (mvpp2_is_xlg(port->phy_interface)) {
+               val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+               val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
+               val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
+               writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+       } else {
+               val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+               val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
+               val |= MVPP2_GMAC_FORCE_LINK_DOWN;
+               writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+       }
+
        port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
 
        port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
@@ -5869,8 +5882,6 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
 
        phylink_set(mask, Autoneg);
        phylink_set_port_modes(mask);
-       phylink_set(mask, Pause);
-       phylink_set(mask, Asym_Pause);
 
        switch (state->interface) {
        case PHY_INTERFACE_MODE_10GBASER:
index 5692c6087bbb0781ef473ea5dfe8f6148c4ae4f7..a30eb90ba3d28a04e57a6a822745bfe232144f48 100644 (file)
@@ -405,6 +405,38 @@ static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
        return -EINVAL;
 }
 
+/* Drop flow control pause frames */
+static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
+{
+       unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
+       struct mvpp2_prs_entry pe;
+       unsigned int len;
+
+       memset(&pe, 0, sizeof(pe));
+
+       /* For all ports - drop flow control frames */
+       pe.index = MVPP2_PE_FC_DROP;
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+       /* Set match on DA */
+       len = ETH_ALEN;
+       while (len--)
+               mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
+
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+                                MVPP2_PRS_RI_DROP_MASK);
+
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+       /* Mask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+       mvpp2_prs_hw_write(priv, &pe);
+}
+
 /* Enable/disable dropping all mac da's */
 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
 {
@@ -1162,6 +1194,7 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
        mvpp2_prs_hw_write(priv, &pe);
 
        /* Create dummy entries for drop all and promiscuous modes */
+       mvpp2_prs_drop_fc(priv);
        mvpp2_prs_mac_drop_all_set(priv, 0, false);
        mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
        mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
@@ -1647,8 +1680,9 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
                                 MVPP2_PRS_RI_L3_PROTO_MASK);
-       /* Skip eth_type + 4 bytes of IPv6 header */
-       mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+       /* Jump to DIP of IPV6 header */
+       mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
+                                MVPP2_MAX_L3_ADDR_SIZE,
                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
        /* Set L3 offset */
        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
index e22f6c85d380346312147daf531bf6c3626e9589..4b68dd37473388617efc76295488b753687a1a46 100644 (file)
 #define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
 #define MVPP2_PE_VLAN_DBL              (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
 #define MVPP2_PE_VLAN_NONE             (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
-/* reserved */
+#define MVPP2_PE_FC_DROP               (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
 #define MVPP2_PE_MAC_MC_PROMISCUOUS    (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
 #define MVPP2_PE_MAC_UC_PROMISCUOUS    (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
 #define MVPP2_PE_MAC_NON_PROMISCUOUS   (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
index 7d0f96290943780d5401a40a45e01e438db8494f..1a8f5a039d5025d64d0fc2e09bafd4dfccfdf5d0 100644 (file)
@@ -871,8 +871,10 @@ static int cgx_lmac_init(struct cgx *cgx)
                if (!lmac)
                        return -ENOMEM;
                lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
-               if (!lmac->name)
-                       return -ENOMEM;
+               if (!lmac->name) {
+                       err = -ENOMEM;
+                       goto err_lmac_free;
+               }
                sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
                lmac->lmac_id = i;
                lmac->cgx = cgx;
@@ -883,7 +885,7 @@ static int cgx_lmac_init(struct cgx *cgx)
                                                 CGX_LMAC_FWI + i * 9),
                                   cgx_fwi_event_handler, 0, lmac->name, lmac);
                if (err)
-                       return err;
+                       goto err_irq;
 
                /* Enable interrupt */
                cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
@@ -895,6 +897,12 @@ static int cgx_lmac_init(struct cgx *cgx)
        }
 
        return cgx_lmac_verify_fwi_version(cgx);
+
+err_irq:
+       kfree(lmac->name);
+err_lmac_free:
+       kfree(lmac);
+       return err;
 }
 
 static int cgx_lmac_exit(struct cgx *cgx)
index d298b935717784e5ba3bdf3344a0898e624a5759..6c6b411e78fd87c66da58761a2f55cb765189a8b 100644 (file)
@@ -469,6 +469,9 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
        int pf = rvu_get_pf(req->hdr.pcifunc);
        u8 cgx_id, lmac_id;
 
+       if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+               return -EPERM;
+
        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
 
        cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
@@ -485,6 +488,9 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
        int rc = 0, i;
        u64 cfg;
 
+       if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+               return -EPERM;
+
        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
 
        rsp->hdr.rc = rc;
index d29af7b9c695a6da6a5d57519bc5c3cd241f89b5..76177f7c5ec292b02fbfff1876205bf9b5bf1ee7 100644 (file)
@@ -626,6 +626,11 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
        if (!reg_c0)
                return true;
 
+       /* If reg_c0 is not equal to the default flow tag then skb->mark
+        * is not supported and must be reset back to 0.
+        */
+       skb->mark = 0;
+
        priv = netdev_priv(skb->dev);
        esw = priv->mdev->priv.eswitch;
 
index e521254d886ef349a13fae9bbb84ef10333d811b..072363e73f1cec36da973533bee411d6a0cd7ee0 100644 (file)
@@ -118,16 +118,17 @@ struct mlx5_ct_tuple {
        u16 zone;
 };
 
-struct mlx5_ct_shared_counter {
+struct mlx5_ct_counter {
        struct mlx5_fc *counter;
        refcount_t refcount;
+       bool is_shared;
 };
 
 struct mlx5_ct_entry {
        struct rhash_head node;
        struct rhash_head tuple_node;
        struct rhash_head tuple_nat_node;
-       struct mlx5_ct_shared_counter *shared_counter;
+       struct mlx5_ct_counter *counter;
        unsigned long cookie;
        unsigned long restore_cookie;
        struct mlx5_ct_tuple tuple;
@@ -394,13 +395,14 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
 }
 
 static void
-mlx5_tc_ct_shared_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
+mlx5_tc_ct_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
 {
-       if (!refcount_dec_and_test(&entry->shared_counter->refcount))
+       if (entry->counter->is_shared &&
+           !refcount_dec_and_test(&entry->counter->refcount))
                return;
 
-       mlx5_fc_destroy(ct_priv->dev, entry->shared_counter->counter);
-       kfree(entry->shared_counter);
+       mlx5_fc_destroy(ct_priv->dev, entry->counter->counter);
+       kfree(entry->counter);
 }
 
 static void
@@ -699,7 +701,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
        attr->dest_ft = ct_priv->post_ct;
        attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
        attr->outer_match_level = MLX5_MATCH_L4;
-       attr->counter = entry->shared_counter->counter;
+       attr->counter = entry->counter->counter;
        attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
 
        mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
@@ -732,13 +734,34 @@ err_attr:
        return err;
 }
 
-static struct mlx5_ct_shared_counter *
+static struct mlx5_ct_counter *
+mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
+{
+       struct mlx5_ct_counter *counter;
+       int ret;
+
+       counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+       if (!counter)
+               return ERR_PTR(-ENOMEM);
+
+       counter->is_shared = false;
+       counter->counter = mlx5_fc_create(ct_priv->dev, true);
+       if (IS_ERR(counter->counter)) {
+               ct_dbg("Failed to create counter for ct entry");
+               ret = PTR_ERR(counter->counter);
+               kfree(counter);
+               return ERR_PTR(ret);
+       }
+
+       return counter;
+}
+
+static struct mlx5_ct_counter *
 mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
                              struct mlx5_ct_entry *entry)
 {
        struct mlx5_ct_tuple rev_tuple = entry->tuple;
-       struct mlx5_ct_shared_counter *shared_counter;
-       struct mlx5_core_dev *dev = ct_priv->dev;
+       struct mlx5_ct_counter *shared_counter;
        struct mlx5_ct_entry *rev_entry;
        __be16 tmp_port;
        int ret;
@@ -767,25 +790,20 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
        rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple,
                                           tuples_ht_params);
        if (rev_entry) {
-               if (refcount_inc_not_zero(&rev_entry->shared_counter->refcount)) {
+               if (refcount_inc_not_zero(&rev_entry->counter->refcount)) {
                        mutex_unlock(&ct_priv->shared_counter_lock);
-                       return rev_entry->shared_counter;
+                       return rev_entry->counter;
                }
        }
        mutex_unlock(&ct_priv->shared_counter_lock);
 
-       shared_counter = kzalloc(sizeof(*shared_counter), GFP_KERNEL);
-       if (!shared_counter)
-               return ERR_PTR(-ENOMEM);
-
-       shared_counter->counter = mlx5_fc_create(dev, true);
-       if (IS_ERR(shared_counter->counter)) {
-               ct_dbg("Failed to create counter for ct entry");
-               ret = PTR_ERR(shared_counter->counter);
-               kfree(shared_counter);
+       shared_counter = mlx5_tc_ct_counter_create(ct_priv);
+       if (IS_ERR(shared_counter)) {
+               ret = PTR_ERR(shared_counter);
                return ERR_PTR(ret);
        }
 
+       shared_counter->is_shared = true;
        refcount_set(&shared_counter->refcount, 1);
        return shared_counter;
 }
@@ -798,10 +816,13 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
 {
        int err;
 
-       entry->shared_counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
-       if (IS_ERR(entry->shared_counter)) {
-               err = PTR_ERR(entry->shared_counter);
-               ct_dbg("Failed to create counter for ct entry");
+       if (nf_ct_acct_enabled(dev_net(ct_priv->netdev)))
+               entry->counter = mlx5_tc_ct_counter_create(ct_priv);
+       else
+               entry->counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
+
+       if (IS_ERR(entry->counter)) {
+               err = PTR_ERR(entry->counter);
                return err;
        }
 
@@ -820,7 +841,7 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
 err_nat:
        mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
 err_orig:
-       mlx5_tc_ct_shared_counter_put(ct_priv, entry);
+       mlx5_tc_ct_counter_put(ct_priv, entry);
        return err;
 }
 
@@ -918,7 +939,7 @@ mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
        rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
                               tuples_ht_params);
        mutex_unlock(&ct_priv->shared_counter_lock);
-       mlx5_tc_ct_shared_counter_put(ct_priv, entry);
+       mlx5_tc_ct_counter_put(ct_priv, entry);
 
 }
 
@@ -956,7 +977,7 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
        if (!entry)
                return -ENOENT;
 
-       mlx5_fc_query_cached(entry->shared_counter->counter, &bytes, &packets, &lastuse);
+       mlx5_fc_query_cached(entry->counter->counter, &bytes, &packets, &lastuse);
        flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
                          FLOW_ACTION_HW_STATS_DELAYED);
 
index 7943eb30b837e55f69d0df97f9b5bc9a1517e77f..4880f2179273071eb3d3571cef53131658ab830c 100644 (file)
@@ -371,6 +371,15 @@ struct mlx5e_swp_spec {
        u8 tun_l4_proto;
 };
 
+static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg)
+{
+       /* SWP offsets are in 2-bytes words */
+       eseg->swp_outer_l3_offset += VLAN_HLEN / 2;
+       eseg->swp_outer_l4_offset += VLAN_HLEN / 2;
+       eseg->swp_inner_l3_offset += VLAN_HLEN / 2;
+       eseg->swp_inner_l4_offset += VLAN_HLEN / 2;
+}
+
 static inline void
 mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
                   struct mlx5e_swp_spec *swp_spec)
index 899b98aca0d3ff2b614c546bf3e43ec2c199a294..1fae7fab8297e4d2a70f4e58995be88d5de2a1d7 100644 (file)
@@ -51,7 +51,7 @@ static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
 }
 
 static inline void
-mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
+mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ihs)
 {
        struct mlx5e_swp_spec swp_spec = {};
        unsigned int offset = 0;
@@ -85,6 +85,8 @@ mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
        }
 
        mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
+       if (skb_vlan_tag_present(skb) &&  ihs)
+               mlx5e_eseg_swp_offsets_add_vlan(eseg);
 }
 
 #else
@@ -163,7 +165,7 @@ static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
 
 static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
                                       struct sk_buff *skb,
-                                      struct mlx5_wqe_eth_seg *eseg)
+                                      struct mlx5_wqe_eth_seg *eseg, u16 ihs)
 {
 #ifdef CONFIG_MLX5_EN_IPSEC
        if (xfrm_offload(skb))
@@ -172,7 +174,7 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
 
 #if IS_ENABLED(CONFIG_GENEVE)
        if (skb->encapsulation)
-               mlx5e_tx_tunnel_accel(skb, eseg);
+               mlx5e_tx_tunnel_accel(skb, eseg, ihs);
 #endif
 
        return true;
index d9076d543104f1d3163e49a55cf7e6ed7d5c1b51..2d37742a888c1514a8506a1adf6a87ca0b6ec8f3 100644 (file)
@@ -1010,6 +1010,22 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
        return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
 }
 
+static int mlx5e_speed_validate(struct net_device *netdev, bool ext,
+                               const unsigned long link_modes, u8 autoneg)
+{
+       /* Extended link-mode has no speed limitations. */
+       if (ext)
+               return 0;
+
+       if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
+           autoneg != AUTONEG_ENABLE) {
+               netdev_err(netdev, "%s: 56G link speed requires autoneg enabled\n",
+                          __func__);
+               return -EINVAL;
+       }
+       return 0;
+}
+
 static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
 {
        u32 i, ptys_modes = 0;
@@ -1103,13 +1119,9 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
        link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
                mlx5e_port_speed2linkmodes(mdev, speed, !ext);
 
-       if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
-           autoneg != AUTONEG_ENABLE) {
-               netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n",
-                          __func__);
-               err = -EINVAL;
+       err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg);
+       if (err)
                goto out;
-       }
 
        link_modes = link_modes & eproto.cap;
        if (!link_modes) {
index fa8149f6eb088ae86e8d67f9527ae6f714c78741..e02e5895703d5a26af3b38618b15315d0da075fe 100644 (file)
@@ -942,6 +942,7 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
        in = kvzalloc(inlen, GFP_KERNEL);
        if (!in) {
                kfree(ft->g);
+               ft->g = NULL;
                return -ENOMEM;
        }
 
@@ -1087,6 +1088,7 @@ static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
        in = kvzalloc(inlen, GFP_KERNEL);
        if (!in) {
                kfree(ft->g);
+               ft->g = NULL;
                return -ENOMEM;
        }
 
@@ -1390,6 +1392,7 @@ err_destroy_groups:
        ft->g[ft->num_groups] = NULL;
        mlx5e_destroy_groups(ft);
        kvfree(in);
+       kfree(ft->g);
 
        return err;
 }
index 7a79d330c0751f6e6be3c88c483326274f84f993..6a852b4901aa0a742989d9882b37f21f07108750 100644 (file)
@@ -3161,7 +3161,8 @@ static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
 
        mlx5_set_port_admin_status(mdev, state);
 
-       if (mlx5_eswitch_mode(mdev) != MLX5_ESWITCH_LEGACY)
+       if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS ||
+           !MLX5_CAP_GEN(mdev, uplink_follow))
                return;
 
        if (state == MLX5_PORT_UP)
index e47e2a0059d0aebcec094192a5dfac9cae4b1a14..61ed671fe741bd48b22d326dd36938fc99aac76d 100644 (file)
@@ -682,9 +682,9 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
 
 static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
                                   struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
-                                  struct mlx5_wqe_eth_seg *eseg)
+                                  struct mlx5_wqe_eth_seg *eseg, u16 ihs)
 {
-       if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg)))
+       if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg, ihs)))
                return false;
 
        mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
@@ -714,7 +714,8 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
                if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
                        struct mlx5_wqe_eth_seg eseg = {};
 
-                       if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg)))
+                       if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg,
+                                                            attr.ihs)))
                                return NETDEV_TX_OK;
 
                        mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
@@ -731,7 +732,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
        /* May update the WQE, but may not post other WQEs. */
        mlx5e_accel_tx_finish(sq, wqe, &accel,
                              (struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
-       if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth)))
+       if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs)))
                return NETDEV_TX_OK;
 
        mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
index 2b85d4777303a5c47a15664a9684d18c72630cfd..3e19b1721303f53284379db71ec81fb389634bc5 100644 (file)
@@ -95,22 +95,21 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
                return 0;
        }
 
-       if (!IS_ERR_OR_NULL(vport->egress.acl))
-               return 0;
-
-       vport->egress.acl = esw_acl_table_create(esw, vport->vport,
-                                                MLX5_FLOW_NAMESPACE_ESW_EGRESS,
-                                                table_size);
-       if (IS_ERR(vport->egress.acl)) {
-               err = PTR_ERR(vport->egress.acl);
-               vport->egress.acl = NULL;
-               goto out;
+       if (!vport->egress.acl) {
+               vport->egress.acl = esw_acl_table_create(esw, vport->vport,
+                                                        MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+                                                        table_size);
+               if (IS_ERR(vport->egress.acl)) {
+                       err = PTR_ERR(vport->egress.acl);
+                       vport->egress.acl = NULL;
+                       goto out;
+               }
+
+               err = esw_acl_egress_lgcy_groups_create(esw, vport);
+               if (err)
+                       goto out;
        }
 
-       err = esw_acl_egress_lgcy_groups_create(esw, vport);
-       if (err)
-               goto out;
-
        esw_debug(esw->dev,
                  "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
                  vport->vport, vport->info.vlan, vport->info.qos);
index f3d45ef082cde227c143d147c68dea2b9b5d4d43..83a05371e2aa14698053acb668495603d6b583af 100644 (file)
@@ -564,7 +564,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
        struct mlx5_core_dev *tmp_dev;
        int i, err;
 
-       if (!MLX5_CAP_GEN(dev, vport_group_manager))
+       if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
+           !MLX5_CAP_GEN(dev, lag_master) ||
+           MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
                return;
 
        tmp_dev = mlx5_get_next_phys_dev(dev);
@@ -582,12 +584,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
        if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0)
                return;
 
-       for (i = 0; i < MLX5_MAX_PORTS; i++) {
-               tmp_dev = ldev->pf[i].dev;
-               if (!tmp_dev || !MLX5_CAP_GEN(tmp_dev, lag_master) ||
-                   MLX5_CAP_GEN(tmp_dev, num_lag_ports) != MLX5_MAX_PORTS)
+       for (i = 0; i < MLX5_MAX_PORTS; i++)
+               if (!ldev->pf[i].dev)
                        break;
-       }
 
        if (i >= MLX5_MAX_PORTS)
                ldev->flags |= MLX5_LAG_FLAG_READY;
index c08315b51fd3aa4e2a259d424ff883efc0901f56..ca6f2fc39ea0af55173bc01708d363b3266e6f8b 100644 (file)
@@ -1368,8 +1368,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
                         MLX5_COREDEV_VF : MLX5_COREDEV_PF;
 
        dev->priv.adev_idx = mlx5_adev_idx_alloc();
-       if (dev->priv.adev_idx < 0)
-               return dev->priv.adev_idx;
+       if (dev->priv.adev_idx < 0) {
+               err = dev->priv.adev_idx;
+               goto adev_init_err;
+       }
 
        err = mlx5_mdev_init(dev, prof_sel);
        if (err)
@@ -1403,6 +1405,7 @@ pci_init_err:
        mlx5_mdev_uninit(dev);
 mdev_init_err:
        mlx5_adev_idx_free(dev->priv.adev_idx);
+adev_init_err:
        mlx5_devlink_free(devlink);
 
        return err;
index 0fc7de4aa572fbd60194aeb5d39f15e1b25efb68..8e0dddc6383f0706a1c3cc5b8693316851a406c3 100644 (file)
@@ -116,7 +116,7 @@ free:
 static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev)
 {
        mlx5_core_roce_gid_set(dev, 0, 0, 0,
-                              NULL, NULL, false, 0, 0);
+                              NULL, NULL, false, 0, 1);
 }
 
 static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid)
index 8fa286ccdd6bb281a834bc135593487878930403..bf85ce9835d7f93728e618f7bddbab1d41c37503 100644 (file)
@@ -19,7 +19,7 @@
 #define MLXSW_THERMAL_ASIC_TEMP_NORM   75000   /* 75C */
 #define MLXSW_THERMAL_ASIC_TEMP_HIGH   85000   /* 85C */
 #define MLXSW_THERMAL_ASIC_TEMP_HOT    105000  /* 105C */
-#define MLXSW_THERMAL_ASIC_TEMP_CRIT   110000  /* 110C */
+#define MLXSW_THERMAL_ASIC_TEMP_CRIT   140000  /* 140C */
 #define MLXSW_THERMAL_HYSTERESIS_TEMP  5000    /* 5C */
 #define MLXSW_THERMAL_MODULE_TEMP_SHIFT        (MLXSW_THERMAL_HYSTERESIS_TEMP * 2)
 #define MLXSW_THERMAL_ZONE_MAX_NAME    16
@@ -176,6 +176,12 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
        if (err)
                return err;
 
+       if (crit_temp > emerg_temp) {
+               dev_warn(dev, "%s : Critical threshold %d is above emergency threshold %d\n",
+                        tz->tzdev->type, crit_temp, emerg_temp);
+               return 0;
+       }
+
        /* According to the system thermal requirements, the thermal zones are
         * defined with four trip points. The critical and emergency
         * temperature thresholds, provided by QSFP module are set as "active"
@@ -190,11 +196,8 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
                tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp;
        tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = crit_temp;
        tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = emerg_temp;
-       if (emerg_temp > crit_temp)
-               tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp +
+       tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp +
                                        MLXSW_THERMAL_MODULE_TEMP_SHIFT;
-       else
-               tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp;
 
        return 0;
 }
index 0b9992bd66262935f2c00adefdfe52f6cdd7d603..ff87a0bc089cffc6cbe71c34ea88d86132ae1d53 100644 (file)
@@ -60,14 +60,27 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port,
                      const unsigned char mac[ETH_ALEN],
                      unsigned int vid, enum macaccess_entry_type type)
 {
+       u32 cmd = ANA_TABLES_MACACCESS_VALID |
+               ANA_TABLES_MACACCESS_DEST_IDX(port) |
+               ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
+               ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN);
+       unsigned int mc_ports;
+
+       /* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */
+       if (type == ENTRYTYPE_MACv4)
+               mc_ports = (mac[1] << 8) | mac[2];
+       else if (type == ENTRYTYPE_MACv6)
+               mc_ports = (mac[0] << 8) | mac[1];
+       else
+               mc_ports = 0;
+
+       if (mc_ports & BIT(ocelot->num_phys_ports))
+               cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY;
+
        ocelot_mact_select(ocelot, mac, vid);
 
        /* Issue a write command */
-       ocelot_write(ocelot, ANA_TABLES_MACACCESS_VALID |
-                            ANA_TABLES_MACACCESS_DEST_IDX(port) |
-                            ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
-                            ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN),
-                            ANA_TABLES_MACACCESS);
+       ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS);
 
        return ocelot_mact_wait_for_completion(ocelot);
 }
index 2bd2840d88bdc89835f08a56fd7c35581492162c..42230f92ca9c8a142f2212a93b98a3190cbaa6de 100644 (file)
@@ -1042,10 +1042,8 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        int ret = 0;
 
-       if (!ocelot_netdevice_dev_check(dev))
-               return 0;
-
        if (event == NETDEV_PRECHANGEUPPER &&
+           ocelot_netdevice_dev_check(dev) &&
            netif_is_lag_master(info->upper_dev)) {
                struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
                struct netlink_ext_ack *extack;
index 776b7d264dc34e83704f116d7d24bb33bd7ff795..2289e1fe37419a2ac447919b55e90ab0a14e6a85 100644 (file)
@@ -506,10 +506,14 @@ static int mac_sonic_platform_probe(struct platform_device *pdev)
 
        err = register_netdev(dev);
        if (err)
-               goto out;
+               goto undo_probe;
 
        return 0;
 
+undo_probe:
+       dma_free_coherent(lp->device,
+                         SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+                         lp->descriptors, lp->descriptors_laddr);
 out:
        free_netdev(dev);
 
@@ -584,12 +588,16 @@ static int mac_sonic_nubus_probe(struct nubus_board *board)
 
        err = register_netdev(ndev);
        if (err)
-               goto out;
+               goto undo_probe;
 
        nubus_set_drvdata(board, ndev);
 
        return 0;
 
+undo_probe:
+       dma_free_coherent(lp->device,
+                         SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+                         lp->descriptors, lp->descriptors_laddr);
 out:
        free_netdev(ndev);
        return err;
index afa166ff7aef5ed1479ba657e0c51737ce6c89f4..28d9e98db81a8bbd82a4fdd0d19c524a00b95332 100644 (file)
@@ -229,11 +229,14 @@ int xtsonic_probe(struct platform_device *pdev)
        sonic_msg_init(dev);
 
        if ((err = register_netdev(dev)))
-               goto out1;
+               goto undo_probe1;
 
        return 0;
 
-out1:
+undo_probe1:
+       dma_free_coherent(lp->device,
+                         SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+                         lp->descriptors, lp->descriptors_laddr);
        release_region(dev->base_addr, SONIC_MEM_SIZE);
 out:
        free_netdev(dev);
index 9156c9825a16fae8467cd153fb592b367204f902..ac4cd5d82e696b91320a8c90756cce871c5e13e2 100644 (file)
@@ -337,7 +337,7 @@ void ionic_rx_fill(struct ionic_queue *q)
        unsigned int i, j;
        unsigned int len;
 
-       len = netdev->mtu + ETH_HLEN;
+       len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
        nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
 
        for (i = ionic_q_space_avail(q); i; i--) {
index 4366c7a8de9515c0809b255613d57315e972e37f..6b5ddb07ee8331ae1038a1d62650233357be180d 100644 (file)
@@ -78,6 +78,7 @@ config QED
        depends on PCI
        select ZLIB_INFLATE
        select CRC8
+       select CRC32
        select NET_DEVLINK
        help
          This enables the support for Marvell FastLinQ adapters family.
index f21847739ef1fecbd37eacb588717304574ca2eb..d258e0ccf9465309eef1b3062cdf2cce8025a565 100644 (file)
@@ -564,11 +564,6 @@ static const struct net_device_ops netxen_netdev_ops = {
        .ndo_set_features = netxen_set_features,
 };
 
-static inline bool netxen_function_zero(struct pci_dev *pdev)
-{
-       return (PCI_FUNC(pdev->devfn) == 0) ? true : false;
-}
-
 static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter,
                                             u32 mode)
 {
@@ -664,7 +659,7 @@ static int netxen_setup_intr(struct netxen_adapter *adapter)
        netxen_initialize_interrupt_registers(adapter);
        netxen_set_msix_bit(pdev, 0);
 
-       if (netxen_function_zero(pdev)) {
+       if (adapter->portnum == 0) {
                if (!netxen_setup_msi_interrupts(adapter, num_msix))
                        netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE);
                else
index a2494bf8500798f96eb3b854c95299bf58bf770a..ca0ee29a57b50ac4fd15d19be46ab6de0766a311 100644 (file)
@@ -1799,6 +1799,11 @@ netdev_features_t qede_features_check(struct sk_buff *skb,
                              ntohs(udp_hdr(skb)->dest) != gnv_port))
                                return features & ~(NETIF_F_CSUM_MASK |
                                                    NETIF_F_GSO_MASK);
+               } else if (l4_proto == IPPROTO_IPIP) {
+                       /* IPIP tunnels are unknown to the device or at least unsupported natively,
+                        * offloads for them can't be done trivially, so disable them for such skb.
+                        */
+                       return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
                }
        }
 
index 46d8510b2fe268b3641805962078743824194550..a569abe7f5ef2e4f2ba1e578908987362d8a3f56 100644 (file)
@@ -2207,7 +2207,8 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
        }
 
        switch (tp->mac_version) {
-       case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
+       case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
+       case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
        case RTL_GIGA_MAC_VER_37:
        case RTL_GIGA_MAC_VER_39:
        case RTL_GIGA_MAC_VER_43:
@@ -2233,7 +2234,8 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
 static void rtl_pll_power_up(struct rtl8169_private *tp)
 {
        switch (tp->mac_version) {
-       case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
+       case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
+       case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
        case RTL_GIGA_MAC_VER_37:
        case RTL_GIGA_MAC_VER_39:
        case RTL_GIGA_MAC_VER_43:
index c633046329352601ce76ad7d12913fffd420e9ec..590b088bc4c7f3e2f0d6d79443fcff6a6f4da077 100644 (file)
@@ -2606,10 +2606,10 @@ static int sh_eth_close(struct net_device *ndev)
        /* Free all the skbuffs in the Rx queue and the DMA buffer. */
        sh_eth_ring_free(ndev);
 
-       pm_runtime_put_sync(&mdp->pdev->dev);
-
        mdp->is_opened = 0;
 
+       pm_runtime_put(&mdp->pdev->dev);
+
        return 0;
 }
 
@@ -3034,6 +3034,28 @@ static int sh_mdio_release(struct sh_eth_private *mdp)
        return 0;
 }
 
+static int sh_mdiobb_read(struct mii_bus *bus, int phy, int reg)
+{
+       int res;
+
+       pm_runtime_get_sync(bus->parent);
+       res = mdiobb_read(bus, phy, reg);
+       pm_runtime_put(bus->parent);
+
+       return res;
+}
+
+static int sh_mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+       int res;
+
+       pm_runtime_get_sync(bus->parent);
+       res = mdiobb_write(bus, phy, reg, val);
+       pm_runtime_put(bus->parent);
+
+       return res;
+}
+
 /* MDIO bus init function */
 static int sh_mdio_init(struct sh_eth_private *mdp,
                        struct sh_eth_plat_data *pd)
@@ -3058,6 +3080,10 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
        if (!mdp->mii_bus)
                return -ENOMEM;
 
+       /* Wrap accessors with Runtime PM-aware ops */
+       mdp->mii_bus->read = sh_mdiobb_read;
+       mdp->mii_bus->write = sh_mdiobb_write;
+
        /* Hook up MII support for ethtool */
        mdp->mii_bus->name = "sh_mii";
        mdp->mii_bus->parent = dev;
index a2e80c89de2d4b2995cc75a246f18ceb214e4c0e..9a6a519426a08ade395e09acdd46fc8b83bcdb19 100644 (file)
@@ -721,6 +721,8 @@ static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G_ID                0x4bb0
 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G_ID                0x4bb1
 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5_ID       0x4bb2
+#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0_ID          0x43ac
+#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1_ID          0x43a2
 #define PCI_DEVICE_ID_INTEL_TGL_SGMII1G_ID             0xa0ac
 
 static const struct pci_device_id intel_eth_pci_id_table[] = {
@@ -735,6 +737,8 @@ static const struct pci_device_id intel_eth_pci_id_table[] = {
        { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID, &ehl_pse1_sgmii1g_info) },
        { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID, &ehl_pse1_sgmii1g_info) },
        { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_info) },
+       { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0_ID, &tgl_sgmii1g_info) },
+       { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1_ID, &tgl_sgmii1g_info) },
        {}
 };
 MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
index 459ae715b33d1e38995436fc652670e2a5661921..f184b00f51166206ade2457f611b9b4a32b349d0 100644 (file)
@@ -135,7 +135,7 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
        struct device *dev = dwmac->dev;
        static const struct clk_parent_data mux_parents[] = {
                { .fw_name = "clkin0", },
-               { .fw_name = "clkin1", },
+               { .index = -1, },
        };
        static const struct clk_div_table div_table[] = {
                { .div = 2, .val = 2, },
index 58e0511badba8ab760c086b8c1c8736745add035..a5e0eff4a38741153bd92dfef2737c12114928a4 100644 (file)
@@ -64,6 +64,7 @@ struct emac_variant {
  * @variant:   reference to the current board variant
  * @regmap:    regmap for using the syscon
  * @internal_phy_powered: Does the internal PHY is enabled
+ * @use_internal_phy: Is the internal PHY selected for use
  * @mux_handle:        Internal pointer used by mdio-mux lib
  */
 struct sunxi_priv_data {
@@ -74,6 +75,7 @@ struct sunxi_priv_data {
        const struct emac_variant *variant;
        struct regmap_field *regmap_field;
        bool internal_phy_powered;
+       bool use_internal_phy;
        void *mux_handle;
 };
 
@@ -539,8 +541,11 @@ static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
        .dma_interrupt = sun8i_dwmac_dma_interrupt,
 };
 
+static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv);
+
 static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
 {
+       struct net_device *ndev = platform_get_drvdata(pdev);
        struct sunxi_priv_data *gmac = priv;
        int ret;
 
@@ -554,13 +559,25 @@ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
 
        ret = clk_prepare_enable(gmac->tx_clk);
        if (ret) {
-               if (gmac->regulator)
-                       regulator_disable(gmac->regulator);
                dev_err(&pdev->dev, "Could not enable AHB clock\n");
-               return ret;
+               goto err_disable_regulator;
+       }
+
+       if (gmac->use_internal_phy) {
+               ret = sun8i_dwmac_power_internal_phy(netdev_priv(ndev));
+               if (ret)
+                       goto err_disable_clk;
        }
 
        return 0;
+
+err_disable_clk:
+       clk_disable_unprepare(gmac->tx_clk);
+err_disable_regulator:
+       if (gmac->regulator)
+               regulator_disable(gmac->regulator);
+
+       return ret;
 }
 
 static void sun8i_dwmac_core_init(struct mac_device_info *hw,
@@ -831,7 +848,6 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
        struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
        u32 reg, val;
        int ret = 0;
-       bool need_power_ephy = false;
 
        if (current_child ^ desired_child) {
                regmap_field_read(gmac->regmap_field, &reg);
@@ -839,13 +855,12 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
                case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID:
                        dev_info(priv->device, "Switch mux to internal PHY");
                        val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SELECT;
-
-                       need_power_ephy = true;
+                       gmac->use_internal_phy = true;
                        break;
                case DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID:
                        dev_info(priv->device, "Switch mux to external PHY");
                        val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SHUTDOWN;
-                       need_power_ephy = false;
+                       gmac->use_internal_phy = false;
                        break;
                default:
                        dev_err(priv->device, "Invalid child ID %x\n",
@@ -853,7 +868,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
                        return -EINVAL;
                }
                regmap_field_write(gmac->regmap_field, val);
-               if (need_power_ephy) {
+               if (gmac->use_internal_phy) {
                        ret = sun8i_dwmac_power_internal_phy(priv);
                        if (ret)
                                return ret;
@@ -883,22 +898,23 @@ static int sun8i_dwmac_register_mdio_mux(struct stmmac_priv *priv)
        return ret;
 }
 
-static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
+static int sun8i_dwmac_set_syscon(struct device *dev,
+                                 struct plat_stmmacenet_data *plat)
 {
-       struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
-       struct device_node *node = priv->device->of_node;
+       struct sunxi_priv_data *gmac = plat->bsp_priv;
+       struct device_node *node = dev->of_node;
        int ret;
        u32 reg, val;
 
        ret = regmap_field_read(gmac->regmap_field, &val);
        if (ret) {
-               dev_err(priv->device, "Fail to read from regmap field.\n");
+               dev_err(dev, "Fail to read from regmap field.\n");
                return ret;
        }
 
        reg = gmac->variant->default_syscon_value;
        if (reg != val)
-               dev_warn(priv->device,
+               dev_warn(dev,
                         "Current syscon value is not the default %x (expect %x)\n",
                         val, reg);
 
@@ -911,9 +927,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
                /* Force EPHY xtal frequency to 24MHz. */
                reg |= H3_EPHY_CLK_SEL;
 
-               ret = of_mdio_parse_addr(priv->device, priv->plat->phy_node);
+               ret = of_mdio_parse_addr(dev, plat->phy_node);
                if (ret < 0) {
-                       dev_err(priv->device, "Could not parse MDIO addr\n");
+                       dev_err(dev, "Could not parse MDIO addr\n");
                        return ret;
                }
                /* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
@@ -929,17 +945,17 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
 
        if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
                if (val % 100) {
-                       dev_err(priv->device, "tx-delay must be a multiple of 100\n");
+                       dev_err(dev, "tx-delay must be a multiple of 100\n");
                        return -EINVAL;
                }
                val /= 100;
-               dev_dbg(priv->device, "set tx-delay to %x\n", val);
+               dev_dbg(dev, "set tx-delay to %x\n", val);
                if (val <= gmac->variant->tx_delay_max) {
                        reg &= ~(gmac->variant->tx_delay_max <<
                                 SYSCON_ETXDC_SHIFT);
                        reg |= (val << SYSCON_ETXDC_SHIFT);
                } else {
-                       dev_err(priv->device, "Invalid TX clock delay: %d\n",
+                       dev_err(dev, "Invalid TX clock delay: %d\n",
                                val);
                        return -EINVAL;
                }
@@ -947,17 +963,17 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
 
        if (!of_property_read_u32(node, "allwinner,rx-delay-ps", &val)) {
                if (val % 100) {
-                       dev_err(priv->device, "rx-delay must be a multiple of 100\n");
+                       dev_err(dev, "rx-delay must be a multiple of 100\n");
                        return -EINVAL;
                }
                val /= 100;
-               dev_dbg(priv->device, "set rx-delay to %x\n", val);
+               dev_dbg(dev, "set rx-delay to %x\n", val);
                if (val <= gmac->variant->rx_delay_max) {
                        reg &= ~(gmac->variant->rx_delay_max <<
                                 SYSCON_ERXDC_SHIFT);
                        reg |= (val << SYSCON_ERXDC_SHIFT);
                } else {
-                       dev_err(priv->device, "Invalid RX clock delay: %d\n",
+                       dev_err(dev, "Invalid RX clock delay: %d\n",
                                val);
                        return -EINVAL;
                }
@@ -968,7 +984,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
        if (gmac->variant->support_rmii)
                reg &= ~SYSCON_RMII_EN;
 
-       switch (priv->plat->interface) {
+       switch (plat->interface) {
        case PHY_INTERFACE_MODE_MII:
                /* default */
                break;
@@ -982,8 +998,8 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
                reg |= SYSCON_RMII_EN | SYSCON_ETCS_EXT_GMII;
                break;
        default:
-               dev_err(priv->device, "Unsupported interface mode: %s",
-                       phy_modes(priv->plat->interface));
+               dev_err(dev, "Unsupported interface mode: %s",
+                       phy_modes(plat->interface));
                return -EINVAL;
        }
 
@@ -1004,17 +1020,10 @@ static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
        struct sunxi_priv_data *gmac = priv;
 
        if (gmac->variant->soc_has_internal_phy) {
-               /* sun8i_dwmac_exit could be called with mdiomux uninit */
-               if (gmac->mux_handle)
-                       mdio_mux_uninit(gmac->mux_handle);
                if (gmac->internal_phy_powered)
                        sun8i_dwmac_unpower_internal_phy(gmac);
        }
 
-       sun8i_dwmac_unset_syscon(gmac);
-
-       reset_control_put(gmac->rst_ephy);
-
        clk_disable_unprepare(gmac->tx_clk);
 
        if (gmac->regulator)
@@ -1049,16 +1058,11 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
 {
        struct mac_device_info *mac;
        struct stmmac_priv *priv = ppriv;
-       int ret;
 
        mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
        if (!mac)
                return NULL;
 
-       ret = sun8i_dwmac_set_syscon(priv);
-       if (ret)
-               return NULL;
-
        mac->pcsr = priv->ioaddr;
        mac->mac = &sun8i_dwmac_ops;
        mac->dma = &sun8i_dwmac_dma_ops;
@@ -1134,10 +1138,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
-       if (IS_ERR(plat_dat))
-               return PTR_ERR(plat_dat);
-
        gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
        if (!gmac)
                return -ENOMEM;
@@ -1201,11 +1201,15 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
        ret = of_get_phy_mode(dev->of_node, &interface);
        if (ret)
                return -EINVAL;
-       plat_dat->interface = interface;
+
+       plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+       if (IS_ERR(plat_dat))
+               return PTR_ERR(plat_dat);
 
        /* platform data specifying hardware features and callbacks.
         * hardware features were copied from Allwinner drivers.
         */
+       plat_dat->interface = interface;
        plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
        plat_dat->tx_coe = 1;
        plat_dat->has_sun8i = true;
@@ -1214,9 +1218,13 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
        plat_dat->exit = sun8i_dwmac_exit;
        plat_dat->setup = sun8i_dwmac_setup;
 
+       ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat);
+       if (ret)
+               goto dwmac_deconfig;
+
        ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
        if (ret)
-               return ret;
+               goto dwmac_syscon;
 
        ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
        if (ret)
@@ -1230,7 +1238,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
        if (gmac->variant->soc_has_internal_phy) {
                ret = get_ephy_nodes(priv);
                if (ret)
-                       goto dwmac_exit;
+                       goto dwmac_remove;
                ret = sun8i_dwmac_register_mdio_mux(priv);
                if (ret) {
                        dev_err(&pdev->dev, "Failed to register mux\n");
@@ -1239,15 +1247,42 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
        } else {
                ret = sun8i_dwmac_reset(priv);
                if (ret)
-                       goto dwmac_exit;
+                       goto dwmac_remove;
        }
 
        return ret;
 dwmac_mux:
-       sun8i_dwmac_unset_syscon(gmac);
+       reset_control_put(gmac->rst_ephy);
+       clk_put(gmac->ephy_clk);
+dwmac_remove:
+       stmmac_dvr_remove(&pdev->dev);
 dwmac_exit:
+       sun8i_dwmac_exit(pdev, gmac);
+dwmac_syscon:
+       sun8i_dwmac_unset_syscon(gmac);
+dwmac_deconfig:
+       stmmac_remove_config_dt(pdev, plat_dat);
+
+       return ret;
+}
+
+static int sun8i_dwmac_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct stmmac_priv *priv = netdev_priv(ndev);
+       struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+
+       if (gmac->variant->soc_has_internal_phy) {
+               mdio_mux_uninit(gmac->mux_handle);
+               sun8i_dwmac_unpower_internal_phy(gmac);
+               reset_control_put(gmac->rst_ephy);
+               clk_put(gmac->ephy_clk);
+       }
+
        stmmac_pltfr_remove(pdev);
-return ret;
+       sun8i_dwmac_unset_syscon(gmac);
+
+       return 0;
 }
 
 static const struct of_device_id sun8i_dwmac_match[] = {
@@ -1269,7 +1304,7 @@ MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
 
 static struct platform_driver sun8i_dwmac_driver = {
        .probe  = sun8i_dwmac_probe,
-       .remove = stmmac_pltfr_remove,
+       .remove = sun8i_dwmac_remove,
        .driver = {
                .name           = "dwmac-sun8i",
                .pm             = &stmmac_pltfr_pm_ops,
index 03e79a677c8bd015f00434635f19217624f0ff85..8f7ac24545efe2659c4a7ea6acb4deb2bf74c9fd 100644 (file)
@@ -568,68 +568,24 @@ static int dwmac5_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
 int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
                         unsigned int ptp_rate)
 {
-       u32 speed, total_offset, offset, ctrl, ctr_low;
-       u32 extcfg = readl(ioaddr + GMAC_EXT_CONFIG);
-       u32 mac_cfg = readl(ioaddr + GMAC_CONFIG);
        int i, ret = 0x0;
-       u64 total_ctr;
-
-       if (extcfg & GMAC_CONFIG_EIPG_EN) {
-               offset = (extcfg & GMAC_CONFIG_EIPG) >> GMAC_CONFIG_EIPG_SHIFT;
-               offset = 104 + (offset * 8);
-       } else {
-               offset = (mac_cfg & GMAC_CONFIG_IPG) >> GMAC_CONFIG_IPG_SHIFT;
-               offset = 96 - (offset * 8);
-       }
-
-       speed = mac_cfg & (GMAC_CONFIG_PS | GMAC_CONFIG_FES);
-       speed = speed >> GMAC_CONFIG_FES_SHIFT;
-
-       switch (speed) {
-       case 0x0:
-               offset = offset * 1000; /* 1G */
-               break;
-       case 0x1:
-               offset = offset * 400; /* 2.5G */
-               break;
-       case 0x2:
-               offset = offset * 100000; /* 10M */
-               break;
-       case 0x3:
-               offset = offset * 10000; /* 100M */
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       offset = offset / 1000;
+       u32 ctrl;
 
        ret |= dwmac5_est_write(ioaddr, BTR_LOW, cfg->btr[0], false);
        ret |= dwmac5_est_write(ioaddr, BTR_HIGH, cfg->btr[1], false);
        ret |= dwmac5_est_write(ioaddr, TER, cfg->ter, false);
        ret |= dwmac5_est_write(ioaddr, LLR, cfg->gcl_size, false);
+       ret |= dwmac5_est_write(ioaddr, CTR_LOW, cfg->ctr[0], false);
+       ret |= dwmac5_est_write(ioaddr, CTR_HIGH, cfg->ctr[1], false);
        if (ret)
                return ret;
 
-       total_offset = 0;
        for (i = 0; i < cfg->gcl_size; i++) {
-               ret = dwmac5_est_write(ioaddr, i, cfg->gcl[i] + offset, true);
+               ret = dwmac5_est_write(ioaddr, i, cfg->gcl[i], true);
                if (ret)
                        return ret;
-
-               total_offset += offset;
        }
 
-       total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000ULL;
-       total_ctr += total_offset;
-
-       ctr_low = do_div(total_ctr, 1000000000);
-
-       ret |= dwmac5_est_write(ioaddr, CTR_LOW, ctr_low, false);
-       ret |= dwmac5_est_write(ioaddr, CTR_HIGH, total_ctr, false);
-       if (ret)
-               return ret;
-
        ctrl = readl(ioaddr + MTL_EST_CONTROL);
        ctrl &= ~PTOV;
        ctrl |= ((1000000000 / ptp_rate) * 6) << PTOV_SHIFT;
index 5b1c12ff98c05f3f56e62ff754d62712cc4be449..26b971cd4da5abfb8407fe9b8bf3aa63c6f87c52 100644 (file)
@@ -2184,7 +2184,7 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
                        spin_lock_irqsave(&ch->lock, flags);
                        stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
                        spin_unlock_irqrestore(&ch->lock, flags);
-                       __napi_schedule_irqoff(&ch->rx_napi);
+                       __napi_schedule(&ch->rx_napi);
                }
        }
 
@@ -2193,7 +2193,7 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
                        spin_lock_irqsave(&ch->lock, flags);
                        stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
                        spin_unlock_irqrestore(&ch->lock, flags);
-                       __napi_schedule_irqoff(&ch->tx_napi);
+                       __napi_schedule(&ch->tx_napi);
                }
        }
 
@@ -4026,6 +4026,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
        int txfifosz = priv->plat->tx_fifo_size;
+       const int mtu = new_mtu;
 
        if (txfifosz == 0)
                txfifosz = priv->dma_cap.tx_fifo_size;
@@ -4043,7 +4044,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
        if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
                return -EINVAL;
 
-       dev->mtu = new_mtu;
+       dev->mtu = mtu;
 
        netdev_update_features(dev);
 
index f5bed4d26e80464bc35f7537ff586439134a37d9..8ed3b2c834a09e44cce6f4810a5484c3cd5d7943 100644 (file)
@@ -599,7 +599,8 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
 {
        u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
        struct plat_stmmacenet_data *plat = priv->plat;
-       struct timespec64 time;
+       struct timespec64 time, current_time;
+       ktime_t current_time_ns;
        bool fpe = false;
        int i, ret = 0;
        u64 ctr;
@@ -694,7 +695,22 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
        }
 
        /* Adjust for real system time */
-       time = ktime_to_timespec64(qopt->base_time);
+       priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
+       current_time_ns = timespec64_to_ktime(current_time);
+       if (ktime_after(qopt->base_time, current_time_ns)) {
+               time = ktime_to_timespec64(qopt->base_time);
+       } else {
+               ktime_t base_time;
+               s64 n;
+
+               n = div64_s64(ktime_sub_ns(current_time_ns, qopt->base_time),
+                             qopt->cycle_time);
+               base_time = ktime_add_ns(qopt->base_time,
+                                        (n + 1) * qopt->cycle_time);
+
+               time = ktime_to_timespec64(base_time);
+       }
+
        priv->plat->est->btr[0] = (u32)time.tv_nsec;
        priv->plat->est->btr[1] = (u32)time.tv_sec;
 
index d1fc7955d422743065f22a8857dcd9259c100824..43222a34cba069b9bc10750cbd9a4bcdfc8b6228 100644 (file)
@@ -599,6 +599,7 @@ void cpts_unregister(struct cpts *cpts)
 
        ptp_clock_unregister(cpts->clock);
        cpts->clock = NULL;
+       cpts->phc_index = -1;
 
        cpts_write32(cpts, 0, int_enable);
        cpts_write32(cpts, 0, control);
@@ -784,6 +785,7 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
        cpts->cc.read = cpts_systim_read;
        cpts->cc.mask = CLOCKSOURCE_MASK(32);
        cpts->info = cpts_info;
+       cpts->phc_index = -1;
 
        if (n_ext_ts)
                cpts->info.n_ext_ts = n_ext_ts;
index c4795249719d45c34048036bd80030c7918672da..14d9a791924bf9b6b16f53cba222ec6ff20cf2bf 100644 (file)
@@ -326,8 +326,8 @@ gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
 }
 
 /* Issue an event ring command and wait for it to complete */
-static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
-                           enum gsi_evt_cmd_opcode opcode)
+static void evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
+                            enum gsi_evt_cmd_opcode opcode)
 {
        struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
        struct completion *completion = &evt_ring->completion;
@@ -340,7 +340,13 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
         * is issued here.  Only permit *this* event ring to trigger
         * an interrupt, and only enable the event control IRQ type
         * when we expect it to occur.
+        *
+        * There's a small chance that a previous command completed
+        * after the interrupt was disabled, so make sure we have no
+        * pending interrupts before we enable them.
         */
+       iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
+
        val = BIT(evt_ring_id);
        iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
        gsi_irq_type_enable(gsi, GSI_EV_CTRL);
@@ -355,19 +361,16 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
        iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
 
        if (success)
-               return 0;
+               return;
 
        dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
                opcode, evt_ring_id, evt_ring->state);
-
-       return -ETIMEDOUT;
 }
 
 /* Allocate an event ring in NOT_ALLOCATED state */
 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
 {
        struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
-       int ret;
 
        /* Get initial event ring state */
        evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
@@ -377,14 +380,16 @@ static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
                return -EINVAL;
        }
 
-       ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
-       if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
-               dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
-                       evt_ring_id, evt_ring->state);
-               ret = -EIO;
-       }
+       evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
 
-       return ret;
+       /* If successful the event ring state will have changed */
+       if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
+               return 0;
+
+       dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
+               evt_ring_id, evt_ring->state);
+
+       return -EIO;
 }
 
 /* Reset a GSI event ring in ALLOCATED or ERROR state. */
@@ -392,7 +397,6 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
 {
        struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
        enum gsi_evt_ring_state state = evt_ring->state;
-       int ret;
 
        if (state != GSI_EVT_RING_STATE_ALLOCATED &&
            state != GSI_EVT_RING_STATE_ERROR) {
@@ -401,17 +405,20 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
                return;
        }
 
-       ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
-       if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
-               dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
-                       evt_ring_id, evt_ring->state);
+       evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
+
+       /* If successful the event ring state will have changed */
+       if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
+               return;
+
+       dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
+               evt_ring_id, evt_ring->state);
 }
 
 /* Issue a hardware de-allocation request for an allocated event ring */
 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
 {
        struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
-       int ret;
 
        if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
                dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
@@ -419,10 +426,14 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
                return;
        }
 
-       ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
-       if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
-               dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
-                       evt_ring_id, evt_ring->state);
+       evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
+
+       /* If successful the event ring state will have changed */
+       if (evt_ring->state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
+               return;
+
+       dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
+               evt_ring_id, evt_ring->state);
 }
 
 /* Fetch the current state of a channel from hardware */
@@ -438,7 +449,7 @@ static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
 }
 
 /* Issue a channel command and wait for it to complete */
-static int
+static void
 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
 {
        struct completion *completion = &channel->completion;
@@ -453,7 +464,13 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
         * issued here.  So we only permit *this* channel to trigger
         * an interrupt and only enable the channel control IRQ type
         * when we expect it to occur.
+        *
+        * There's a small chance that a previous command completed
+        * after the interrupt was disabled, so make sure we have no
+        * pending interrupts before we enable them.
         */
+       iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
+
        val = BIT(channel_id);
        iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
        gsi_irq_type_enable(gsi, GSI_CH_CTRL);
@@ -467,12 +484,10 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
        iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
 
        if (success)
-               return 0;
+               return;
 
        dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
                opcode, channel_id, gsi_channel_state(channel));
-
-       return -ETIMEDOUT;
 }
 
 /* Allocate GSI channel in NOT_ALLOCATED state */
@@ -481,7 +496,6 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
        struct gsi_channel *channel = &gsi->channel[channel_id];
        struct device *dev = gsi->dev;
        enum gsi_channel_state state;
-       int ret;
 
        /* Get initial channel state */
        state = gsi_channel_state(channel);
@@ -491,17 +505,17 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
                return -EINVAL;
        }
 
-       ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
+       gsi_channel_command(channel, GSI_CH_ALLOCATE);
 
-       /* Channel state will normally have been updated */
+       /* If successful the channel state will have changed */
        state = gsi_channel_state(channel);
-       if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) {
-               dev_err(dev, "channel %u bad state %u after alloc\n",
-                       channel_id, state);
-               ret = -EIO;
-       }
+       if (state == GSI_CHANNEL_STATE_ALLOCATED)
+               return 0;
 
-       return ret;
+       dev_err(dev, "channel %u bad state %u after alloc\n",
+               channel_id, state);
+
+       return -EIO;
 }
 
 /* Start an ALLOCATED channel */
@@ -509,7 +523,6 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
 {
        struct device *dev = channel->gsi->dev;
        enum gsi_channel_state state;
-       int ret;
 
        state = gsi_channel_state(channel);
        if (state != GSI_CHANNEL_STATE_ALLOCATED &&
@@ -519,17 +532,17 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
                return -EINVAL;
        }
 
-       ret = gsi_channel_command(channel, GSI_CH_START);
+       gsi_channel_command(channel, GSI_CH_START);
 
-       /* Channel state will normally have been updated */
+       /* If successful the channel state will have changed */
        state = gsi_channel_state(channel);
-       if (!ret && state != GSI_CHANNEL_STATE_STARTED) {
-               dev_err(dev, "channel %u bad state %u after start\n",
-                       gsi_channel_id(channel), state);
-               ret = -EIO;
-       }
+       if (state == GSI_CHANNEL_STATE_STARTED)
+               return 0;
 
-       return ret;
+       dev_err(dev, "channel %u bad state %u after start\n",
+               gsi_channel_id(channel), state);
+
+       return -EIO;
 }
 
 /* Stop a GSI channel in STARTED state */
@@ -537,7 +550,6 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
 {
        struct device *dev = channel->gsi->dev;
        enum gsi_channel_state state;
-       int ret;
 
        state = gsi_channel_state(channel);
 
@@ -554,12 +566,12 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
                return -EINVAL;
        }
 
-       ret = gsi_channel_command(channel, GSI_CH_STOP);
+       gsi_channel_command(channel, GSI_CH_STOP);
 
-       /* Channel state will normally have been updated */
+       /* If successful the channel state will have changed */
        state = gsi_channel_state(channel);
-       if (ret || state == GSI_CHANNEL_STATE_STOPPED)
-               return ret;
+       if (state == GSI_CHANNEL_STATE_STOPPED)
+               return 0;
 
        /* We may have to try again if stop is in progress */
        if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
@@ -576,7 +588,6 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
 {
        struct device *dev = channel->gsi->dev;
        enum gsi_channel_state state;
-       int ret;
 
        msleep(1);      /* A short delay is required before a RESET command */
 
@@ -590,11 +601,11 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
                return;
        }
 
-       ret = gsi_channel_command(channel, GSI_CH_RESET);
+       gsi_channel_command(channel, GSI_CH_RESET);
 
-       /* Channel state will normally have been updated */
+       /* If successful the channel state will have changed */
        state = gsi_channel_state(channel);
-       if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED)
+       if (state != GSI_CHANNEL_STATE_ALLOCATED)
                dev_err(dev, "channel %u bad state %u after reset\n",
                        gsi_channel_id(channel), state);
 }
@@ -605,7 +616,6 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
        struct gsi_channel *channel = &gsi->channel[channel_id];
        struct device *dev = gsi->dev;
        enum gsi_channel_state state;
-       int ret;
 
        state = gsi_channel_state(channel);
        if (state != GSI_CHANNEL_STATE_ALLOCATED) {
@@ -614,11 +624,12 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
                return;
        }
 
-       ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC);
+       gsi_channel_command(channel, GSI_CH_DE_ALLOC);
 
-       /* Channel state will normally have been updated */
+       /* If successful the channel state will have changed */
        state = gsi_channel_state(channel);
-       if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
+
+       if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
                dev_err(dev, "channel %u bad state %u after dealloc\n",
                        channel_id, state);
 }
index 9dcf16f399b7acd4526679a6b5583d777f561268..135c393437f127261366a66e7fd5fa214f0d1d1d 100644 (file)
@@ -115,13 +115,13 @@ static int ipa_interconnect_enable(struct ipa *ipa)
                return ret;
 
        data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM];
-       ret = icc_set_bw(clock->memory_path, data->average_rate,
+       ret = icc_set_bw(clock->imem_path, data->average_rate,
                         data->peak_rate);
        if (ret)
                goto err_memory_path_disable;
 
        data = &clock->interconnect_data[IPA_INTERCONNECT_CONFIG];
-       ret = icc_set_bw(clock->memory_path, data->average_rate,
+       ret = icc_set_bw(clock->config_path, data->average_rate,
                         data->peak_rate);
        if (ret)
                goto err_imem_path_disable;
index e34fe2d77324eb658aa302d99a1454a22a1f2600..9b08eb8239846300ffe50359dc435bc7d5c87b15 100644 (file)
@@ -216,6 +216,7 @@ int ipa_modem_start(struct ipa *ipa)
        ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
        ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
 
+       SET_NETDEV_DEV(netdev, &ipa->pdev->dev);
        priv = netdev_priv(netdev);
        priv->ipa = ipa;
 
index 5136275c8e7399fbbd74d048abb89f50929e5af3..d3915f83185430e9db7e6cfb3903af50ba244d7a 100644 (file)
@@ -149,7 +149,7 @@ static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr)
        return dev_addr;
 }
 
-static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
+int mdiobb_read(struct mii_bus *bus, int phy, int reg)
 {
        struct mdiobb_ctrl *ctrl = bus->priv;
        int ret, i;
@@ -180,8 +180,9 @@ static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
        mdiobb_get_bit(ctrl);
        return ret;
 }
+EXPORT_SYMBOL(mdiobb_read);
 
-static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
+int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
 {
        struct mdiobb_ctrl *ctrl = bus->priv;
 
@@ -201,6 +202,7 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
        mdiobb_get_bit(ctrl);
        return 0;
 }
+EXPORT_SYMBOL(mdiobb_write);
 
 struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
 {
index 33372756a451fb45fa29e50a6e74f8965d461839..ddb78fb4d6dc37480e4ac6b131442316a27bcc84 100644 (file)
@@ -317,7 +317,8 @@ static int smsc_phy_probe(struct phy_device *phydev)
        /* Make clk optional to keep DTB backward compatibility. */
        priv->refclk = clk_get_optional(dev, NULL);
        if (IS_ERR(priv->refclk))
-               dev_err_probe(dev, PTR_ERR(priv->refclk), "Failed to request clock\n");
+               return dev_err_probe(dev, PTR_ERR(priv->refclk),
+                                    "Failed to request clock\n");
 
        ret = clk_prepare_enable(priv->refclk);
        if (ret)
index 09c27f7773f9592a44ba41b24712825d4b828913..d445ecb1d0c75f406d971ab5131db6911ef95aff 100644 (file)
@@ -623,6 +623,7 @@ static int ppp_bridge_channels(struct channel *pch, struct channel *pchb)
                write_unlock_bh(&pch->upl);
                return -EALREADY;
        }
+       refcount_inc(&pchb->file.refcnt);
        rcu_assign_pointer(pch->bridge, pchb);
        write_unlock_bh(&pch->upl);
 
@@ -632,19 +633,24 @@ static int ppp_bridge_channels(struct channel *pch, struct channel *pchb)
                write_unlock_bh(&pchb->upl);
                goto err_unset;
        }
+       refcount_inc(&pch->file.refcnt);
        rcu_assign_pointer(pchb->bridge, pch);
        write_unlock_bh(&pchb->upl);
 
-       refcount_inc(&pch->file.refcnt);
-       refcount_inc(&pchb->file.refcnt);
-
        return 0;
 
 err_unset:
        write_lock_bh(&pch->upl);
+       /* Re-read pch->bridge with upl held in case it was modified concurrently */
+       pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl));
        RCU_INIT_POINTER(pch->bridge, NULL);
        write_unlock_bh(&pch->upl);
        synchronize_rcu();
+
+       if (pchb)
+               if (refcount_dec_and_test(&pchb->file.refcnt))
+                       ppp_destroy_channel(pchb);
+
        return -EALREADY;
 }
 
index fbed05ae7b0f6e4f5b91213f8fdd4242385e5108..978ac0981d16017d648ebd04f7746fb498db3620 100644 (file)
@@ -1365,7 +1365,7 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
        int i;
 
        if (it->nr_segs > MAX_SKB_FRAGS + 1)
-               return ERR_PTR(-ENOMEM);
+               return ERR_PTR(-EMSGSIZE);
 
        local_bh_disable();
        skb = napi_get_frags(&tfile->napi);
index 1e37190287808973b6d56b190a9dfe848754be47..fbbe7864363190136d95c22d4e09aefa10da8aa0 100644 (file)
@@ -631,7 +631,6 @@ config USB_NET_AQC111
 config USB_RTL8153_ECM
        tristate "RTL8153 ECM support"
        depends on USB_NET_CDCETHER && (USB_RTL8152 || USB_RTL8152=n)
-       default y
        help
          This option supports ECM mode for RTL8153 ethernet adapter, when
          CONFIG_USB_RTL8152 is not set, or the RTL8153 device is not
index 8c1d61c2cbacbca84da2051a77d4f3e0bc943cd3..6aaa0675c28a397c30991375f22256e9209bb2a3 100644 (file)
@@ -793,6 +793,13 @@ static const struct usb_device_id  products[] = {
        .driver_info = 0,
 },
 
+/* Lenovo Powered USB-C Travel Hub (4X90S92381, based on Realtek RTL8153) */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x721e, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+       .driver_info = 0,
+},
+
 /* ThinkPad USB-C Dock Gen 2 (based on Realtek RTL8153) */
 {
        USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa387, USB_CLASS_COMM,
index 2bac57d5e8d50f92d7d9ca13f14689b7bb958bf3..291e76d32abe7c4ee928f8af1e669d9720495667 100644 (file)
@@ -1199,7 +1199,10 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
         * accordingly. Otherwise, we should check here.
         */
        if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
-               delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus);
+               delayed_ndp_size = ctx->max_ndp_size +
+                       max_t(u32,
+                             ctx->tx_ndp_modulus,
+                             ctx->tx_modulus + ctx->tx_remainder) - 1;
        else
                delayed_ndp_size = 0;
 
@@ -1410,7 +1413,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
        if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
            skb_out->len > ctx->min_tx_pkt) {
                padding_count = ctx->tx_curr_size - skb_out->len;
-               skb_put_zero(skb_out, padding_count);
+               if (!WARN_ON(padding_count > ctx->tx_curr_size))
+                       skb_put_zero(skb_out, padding_count);
        } else if (skb_out->len < ctx->tx_curr_size &&
                   (skb_out->len % dev->maxpacket) == 0) {
                skb_put_u8(skb_out, 0); /* force short packet */
@@ -1823,6 +1827,15 @@ cdc_ncm_speed_change(struct usbnet *dev,
        uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
        uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
 
+       /* if the speed hasn't changed, don't report it.
+        * RTL8156 shipped before 2021 sends notification about every 32ms.
+        */
+       if (dev->rx_speed == rx_speed && dev->tx_speed == tx_speed)
+               return;
+
+       dev->rx_speed = rx_speed;
+       dev->tx_speed = tx_speed;
+
        /*
         * Currently the USB-NET API does not support reporting the actual
         * device speed. Do print it instead.
@@ -1863,10 +1876,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
                 * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
                 * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
                 */
-               netif_info(dev, link, dev->net,
-                          "network connection: %sconnected\n",
-                          !!event->wValue ? "" : "dis");
-               usbnet_link_change(dev, !!event->wValue, 0);
+               if (netif_carrier_ok(dev->net) != !!event->wValue)
+                       usbnet_link_change(dev, !!event->wValue, 0);
                break;
 
        case USB_CDC_NOTIFY_SPEED_CHANGE:
index d166c321ee9b6ca78a93ba17a95990978b0a61d6..af19513a9f75b5a72add34788a7a6134d10b2e09 100644 (file)
@@ -1013,6 +1013,7 @@ static const struct usb_device_id products[] = {
        {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)},   /* Quectel EC25, EC20 R2.0  Mini PCIe */
        {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)},   /* Quectel EP06/EG06/EM06 */
        {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)},   /* Quectel EG12/EM12 */
+       {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0620)},   /* Quectel EM160R-GL */
        {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)},   /* Quectel RM500Q-GL */
 
        /* 3. Combined interface devices matching on interface number */
index c448d608982168826bf25a923b6ffd283c7f5206..67cd6986634fb4552b902559bc628ece68df4cfc 100644 (file)
@@ -6877,6 +6877,7 @@ static const struct usb_device_id rtl8152_table[] = {
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7205)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x720c)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7214)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x721e)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0xa387)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
        {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA,  0x09ff)},
index 2c3fabd38b1635d01f139796869b18c1b133f2fc..20b2df8d74ae1b33d2eeb9ce49015b6fba62acf9 100644 (file)
@@ -122,12 +122,20 @@ static const struct driver_info r8153_info = {
 };
 
 static const struct usb_device_id products[] = {
+/* Realtek RTL8153 Based USB 3.0 Ethernet Adapters */
 {
        USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_REALTEK, 0x8153, USB_CLASS_COMM,
                                      USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
        .driver_info = (unsigned long)&r8153_info,
 },
 
+/* Lenovo Powered USB-C Travel Hub (4X90S92381, based on Realtek RTL8153) */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_LENOVO, 0x721e, USB_CLASS_COMM,
+                                     USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+       .driver_info = (unsigned long)&r8153_info,
+},
+
        { },            /* END */
 };
 MODULE_DEVICE_TABLE(usb, products);
index 6609d21ef8942ef044f0b31aaadbd8084924568e..f813ca9dec53167c8a959489b6590c7ab6173474 100644 (file)
@@ -387,7 +387,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
        reply_len = sizeof *phym;
        retval = rndis_query(dev, intf, u.buf,
                             RNDIS_OID_GEN_PHYSICAL_MEDIUM,
-                            0, (void **) &phym, &reply_len);
+                            reply_len, (void **)&phym, &reply_len);
        if (retval != 0 || !phym) {
                /* OID is optional so don't fail here. */
                phym_unspec = cpu_to_le32(RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED);
index 4c41df624dbb1ee0dd68b1fa50d2ee39bc1dad8b..508408fbe78fbd8658dc226834b5b1b334b8b011 100644 (file)
@@ -2093,14 +2093,16 @@ static int virtnet_set_channels(struct net_device *dev,
 
        get_online_cpus();
        err = _virtnet_set_queues(vi, queue_pairs);
-       if (!err) {
-               netif_set_real_num_tx_queues(dev, queue_pairs);
-               netif_set_real_num_rx_queues(dev, queue_pairs);
-
-               virtnet_set_affinity(vi);
+       if (err) {
+               put_online_cpus();
+               goto err;
        }
+       virtnet_set_affinity(vi);
        put_online_cpus();
 
+       netif_set_real_num_tx_queues(dev, queue_pairs);
+       netif_set_real_num_rx_queues(dev, queue_pairs);
+ err:
        return err;
 }
 
index 4029fde71a9e5758ab78575205dd63c2eb030fc1..83c9481995dd2f07e10bf88668718101214dd090 100644 (file)
@@ -282,6 +282,7 @@ config SLIC_DS26522
        tristate "Slic Maxim ds26522 card support"
        depends on SPI
        depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
+       select BITREVERSE
        help
          This module initializes and configures the slic maxim card
          in T1 or E1 mode.
index 64f855651336963eaa2401669f7c422d9e376ffd..261b53fc8e04cb8c141eba8c29ff155b73b069c1 100644 (file)
@@ -569,6 +569,13 @@ static void ppp_timer(struct timer_list *t)
        unsigned long flags;
 
        spin_lock_irqsave(&ppp->lock, flags);
+       /* mod_timer could be called after we entered this function but
+        * before we got the lock.
+        */
+       if (timer_pending(&proto->timer)) {
+               spin_unlock_irqrestore(&ppp->lock, flags);
+               return;
+       }
        switch (proto->state) {
        case STOPPING:
        case REQ_SENT:
index b97c38b9a270135c5e4f25d84ea781885bc918c1..350b7913622cb76517c42fde3306acc89ba4799c 100644 (file)
@@ -185,7 +185,7 @@ int ath11k_core_suspend(struct ath11k_base *ab)
        ath11k_hif_ce_irq_disable(ab);
 
        ret = ath11k_hif_suspend(ab);
-       if (!ret) {
+       if (ret) {
                ath11k_warn(ab, "failed to suspend hif: %d\n", ret);
                return ret;
        }
index 205c0f1a40e91d33f84b85f0d68cf4fbf1e3bfb9..920e5026a635fceabfa6f3e388f034dbe886d932 100644 (file)
@@ -2294,6 +2294,7 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
 {
        u8 channel_num;
        u32 center_freq;
+       struct ieee80211_channel *channel;
 
        rx_status->freq = 0;
        rx_status->rate_idx = 0;
@@ -2314,9 +2315,12 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
                rx_status->band = NL80211_BAND_5GHZ;
        } else {
                spin_lock_bh(&ar->data_lock);
-               rx_status->band = ar->rx_channel->band;
-               channel_num =
-                       ieee80211_frequency_to_channel(ar->rx_channel->center_freq);
+               channel = ar->rx_channel;
+               if (channel) {
+                       rx_status->band = channel->band;
+                       channel_num =
+                               ieee80211_frequency_to_channel(channel->center_freq);
+               }
                spin_unlock_bh(&ar->data_lock);
                ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
                                rx_desc, sizeof(struct hal_rx_desc));
index 5c175e3e09b2850e9d03959c60d1369c80c7b6a0..c1608f64ea95d6b692701e8cf78c1aa9cd8d3872 100644 (file)
@@ -3021,6 +3021,7 @@ static int ath11k_mac_station_add(struct ath11k *ar,
        }
 
        if (ab->hw_params.vdev_start_delay &&
+           !arvif->is_started &&
            arvif->vdev_type != WMI_VDEV_TYPE_AP) {
                ret = ath11k_start_vdev_delay(ar->hw, vif);
                if (ret) {
@@ -5284,7 +5285,8 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
        /* for QCA6390 bss peer must be created before vdev_start */
        if (ab->hw_params.vdev_start_delay &&
            arvif->vdev_type != WMI_VDEV_TYPE_AP &&
-           arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
+           arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+           !ath11k_peer_find_by_vdev_id(ab, arvif->vdev_id)) {
                memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
                ret = 0;
                goto out;
@@ -5295,7 +5297,9 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
                goto out;
        }
 
-       if (ab->hw_params.vdev_start_delay) {
+       if (ab->hw_params.vdev_start_delay &&
+           (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+           arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)) {
                param.vdev_id = arvif->vdev_id;
                param.peer_type = WMI_PEER_TYPE_DEFAULT;
                param.peer_addr = ar->mac_addr;
index 857647aa57c8a7a66c41eaafa430667d062bc88e..20b415cd96c4aadaa233aed4cbabbae6996c4201 100644 (file)
@@ -274,7 +274,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
                                      PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG,
                                      PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL,
                                      PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK);
-       if (!ret) {
+       if (ret) {
                ath11k_warn(ab, "failed to set sysclk: %d\n", ret);
                return ret;
        }
@@ -283,7 +283,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_REG,
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_VAL,
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
-       if (!ret) {
+       if (ret) {
                ath11k_warn(ab, "failed to set dtct config1 error: %d\n", ret);
                return ret;
        }
@@ -292,7 +292,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_REG,
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_VAL,
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
-       if (!ret) {
+       if (ret) {
                ath11k_warn(ab, "failed to set dtct config2: %d\n", ret);
                return ret;
        }
@@ -301,7 +301,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_REG,
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_VAL,
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
-       if (!ret) {
+       if (ret) {
                ath11k_warn(ab, "failed to set dtct config4: %d\n", ret);
                return ret;
        }
@@ -886,6 +886,32 @@ static void ath11k_pci_free_region(struct ath11k_pci *ab_pci)
                pci_disable_device(pci_dev);
 }
 
+static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci)
+{
+       struct ath11k_base *ab = ab_pci->ab;
+
+       pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+                                 &ab_pci->link_ctl);
+
+       ath11k_dbg(ab, ATH11K_DBG_PCI, "pci link_ctl 0x%04x L0s %d L1 %d\n",
+                  ab_pci->link_ctl,
+                  u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S),
+                  u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
+
+       /* disable L0s and L1 */
+       pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+                                  ab_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
+
+       set_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags);
+}
+
+static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci)
+{
+       if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags))
+               pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+                                          ab_pci->link_ctl);
+}
+
 static int ath11k_pci_power_up(struct ath11k_base *ab)
 {
        struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
@@ -895,6 +921,11 @@ static int ath11k_pci_power_up(struct ath11k_base *ab)
        clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
        ath11k_pci_sw_reset(ab_pci->ab, true);
 
+       /* Disable ASPM during firmware download due to problems switching
+        * to AMSS state.
+        */
+       ath11k_pci_aspm_disable(ab_pci);
+
        ret = ath11k_mhi_start(ab_pci);
        if (ret) {
                ath11k_err(ab, "failed to start mhi: %d\n", ret);
@@ -908,6 +939,9 @@ static void ath11k_pci_power_down(struct ath11k_base *ab)
 {
        struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
 
+       /* restore aspm in case firmware bootup fails */
+       ath11k_pci_aspm_restore(ab_pci);
+
        ath11k_pci_force_wake(ab_pci->ab);
        ath11k_mhi_stop(ab_pci);
        clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
@@ -965,6 +999,8 @@ static int ath11k_pci_start(struct ath11k_base *ab)
 
        set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
 
+       ath11k_pci_aspm_restore(ab_pci);
+
        ath11k_pci_ce_irqs_enable(ab);
        ath11k_ce_rx_post_buf(ab);
 
index 0432a702416b42b6dd6bd3046c5c9db61a44b530..fe44d0dfce1956912474eff47f2a86c6ebd0cbb3 100644 (file)
@@ -63,6 +63,7 @@ struct ath11k_msi_config {
 enum ath11k_pci_flags {
        ATH11K_PCI_FLAG_INIT_DONE,
        ATH11K_PCI_FLAG_IS_MSI_64,
+       ATH11K_PCI_ASPM_RESTORE,
 };
 
 struct ath11k_pci {
@@ -80,6 +81,7 @@ struct ath11k_pci {
 
        /* enum ath11k_pci_flags */
        unsigned long flags;
+       u16 link_ctl;
 };
 
 static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
index 1866d82678fa9fc981cfea09946f0825111c3738..b69e7ebfa930327fdf8b10260c312d5888cf43bc 100644 (file)
@@ -76,6 +76,23 @@ struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
        return NULL;
 }
 
+struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
+                                               int vdev_id)
+{
+       struct ath11k_peer *peer;
+
+       spin_lock_bh(&ab->base_lock);
+
+       list_for_each_entry(peer, &ab->peers, list) {
+               if (vdev_id == peer->vdev_id) {
+                       spin_unlock_bh(&ab->base_lock);
+                       return peer;
+               }
+       }
+       spin_unlock_bh(&ab->base_lock);
+       return NULL;
+}
+
 void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
 {
        struct ath11k_peer *peer;
index bba2e00b6944aeb00c31f4b80341512528bb52ab..8553ed061aeaaf12b5598a08385cfcc2018fae74 100644 (file)
@@ -43,5 +43,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
                       struct ieee80211_sta *sta, struct peer_create_params *param);
 int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
                                     const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
+                                               int vdev_id);
 
 #endif /* _PEER_H_ */
index f0b5c50974f3e6b6ca04801964c49dfae6c067ae..0db623ff4bb9b33991df8620cb18fb4689e32906 100644 (file)
@@ -1660,6 +1660,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
        struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
        struct qmi_txn txn = {};
        int ret = 0, i;
+       bool delayed;
 
        req = kzalloc(sizeof(*req), GFP_KERNEL);
        if (!req)
@@ -1672,11 +1673,13 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
         * failure to FW and FW will then request mulitple blocks of small
         * chunk size memory.
         */
-       if (!ab->bus_params.fixed_mem_region && ab->qmi.mem_seg_count <= 2) {
+       if (!ab->bus_params.fixed_mem_region && ab->qmi.target_mem_delayed) {
+               delayed = true;
                ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi delays mem_request %d\n",
                           ab->qmi.mem_seg_count);
                memset(req, 0, sizeof(*req));
        } else {
+               delayed = false;
                req->mem_seg_len = ab->qmi.mem_seg_count;
 
                for (i = 0; i < req->mem_seg_len ; i++) {
@@ -1708,6 +1711,12 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
        }
 
        if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+               /* the error response is expected when
+                * target_mem_delayed is true.
+                */
+               if (delayed && resp.resp.error == 0)
+                       goto out;
+
                ath11k_warn(ab, "Respond mem req failed, result: %d, err: %d\n",
                            resp.resp.result, resp.resp.error);
                ret = -EINVAL;
@@ -1742,6 +1751,8 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
        int i;
        struct target_mem_chunk *chunk;
 
+       ab->qmi.target_mem_delayed = false;
+
        for (i = 0; i < ab->qmi.mem_seg_count; i++) {
                chunk = &ab->qmi.target_mem[i];
                chunk->vaddr = dma_alloc_coherent(ab->dev,
@@ -1749,6 +1760,15 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
                                                  &chunk->paddr,
                                                  GFP_KERNEL);
                if (!chunk->vaddr) {
+                       if (ab->qmi.mem_seg_count <= 2) {
+                               ath11k_dbg(ab, ATH11K_DBG_QMI,
+                                          "qmi dma allocation failed (%d B type %u), will try later with small size\n",
+                                           chunk->size,
+                                           chunk->type);
+                               ath11k_qmi_free_target_mem_chunk(ab);
+                               ab->qmi.target_mem_delayed = true;
+                               return 0;
+                       }
                        ath11k_err(ab, "failed to alloc memory, size: 0x%x, type: %u\n",
                                   chunk->size,
                                   chunk->type);
@@ -2517,7 +2537,7 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
                                    ret);
                        return;
                }
-       } else if (msg->mem_seg_len > 2) {
+       } else {
                ret = ath11k_qmi_alloc_target_mem_chunk(ab);
                if (ret) {
                        ath11k_warn(ab, "qmi failed to alloc target memory: %d\n",
index 92925c9eac67499656f47b7925c17e4bd9f7e634..7bad374cc23a63c12152d7b12ca1772aca1bee29 100644 (file)
@@ -125,6 +125,7 @@ struct ath11k_qmi {
        struct target_mem_chunk target_mem[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
        u32 mem_seg_count;
        u32 target_mem_mode;
+       bool target_mem_delayed;
        u8 cal_done;
        struct target_info target;
        struct m3_mem_region m3_mem;
index da4b546b62cb5d583d2a28038fba9f4a96f46012..73869d445c5b3a151179539a34cd254c79cb68aa 100644 (file)
@@ -3460,6 +3460,9 @@ int ath11k_wmi_set_hw_mode(struct ath11k_base *ab,
        len = sizeof(*cmd);
 
        skb = ath11k_wmi_alloc_skb(wmi_ab, len);
+       if (!skb)
+               return -ENOMEM;
+
        cmd = (struct wmi_pdev_set_hw_mode_cmd_param *)skb->data;
 
        cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) |
index 6a95b199bf626f35e836385d51dd0408b04cf13f..f074e9c31aa22267bdc07a78bb84a5cca59fd8be 100644 (file)
@@ -2,6 +2,7 @@
 config WIL6210
        tristate "Wilocity 60g WiFi card wil6210 support"
        select WANT_DEV_COREDUMP
+       select CRC32
        depends on CFG80211
        depends on PCI
        default n
index ed4635bd151a2deb6fb072a86388c56c97044915..102a8f14c22d4f20ce0126e70311fe10aa9ce419 100644 (file)
@@ -40,9 +40,9 @@ static const struct ieee80211_iface_limit if_limits[] = {
                .types = BIT(NL80211_IFTYPE_ADHOC)
        }, {
                .max = 16,
-               .types = BIT(NL80211_IFTYPE_AP) |
+               .types = BIT(NL80211_IFTYPE_AP)
 #ifdef CONFIG_MAC80211_MESH
-                        BIT(NL80211_IFTYPE_MESH_POINT)
+                        BIT(NL80211_IFTYPE_MESH_POINT)
 #endif
        }, {
                .max = MT7915_MAX_INTERFACES,
index 62b5b912818fa205cbf2c69bf820cceab0b847f7..0b6facb17ff722772981476943c1a0d3a6f5c2d8 100644 (file)
@@ -157,10 +157,14 @@ static void mt76s_net_worker(struct mt76_worker *w)
 
 static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
 {
-       bool wake, mcu = q == dev->q_mcu[MT_MCUQ_WM];
        struct mt76_queue_entry entry;
        int nframes = 0;
+       bool mcu;
 
+       if (!q)
+               return 0;
+
+       mcu = q == dev->q_mcu[MT_MCUQ_WM];
        while (q->queued > 0) {
                if (!q->entry[q->tail].done)
                        break;
@@ -177,21 +181,12 @@ static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
                nframes++;
        }
 
-       wake = q->stopped && q->queued < q->ndesc - 8;
-       if (wake)
-               q->stopped = false;
-
        if (!q->queued)
                wake_up(&dev->tx_wait);
 
-       if (mcu)
-               goto out;
-
-       mt76_txq_schedule(&dev->phy, q->qid);
+       if (!mcu)
+               mt76_txq_schedule(&dev->phy, q->qid);
 
-       if (wake)
-               ieee80211_wake_queue(dev->hw, q->qid);
-out:
        return nframes;
 }
 
index dc850109de22d66a9755a75db5a9164d88dcf557..b95d093728b9b7d0fc9b6a8cd52b87e31ea0bacf 100644 (file)
@@ -811,11 +811,12 @@ static void mt76u_status_worker(struct mt76_worker *w)
        struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
        struct mt76_queue_entry entry;
        struct mt76_queue *q;
-       bool wake;
        int i;
 
        for (i = 0; i < IEEE80211_NUM_ACS; i++) {
                q = dev->phy.q_tx[i];
+               if (!q)
+                       continue;
 
                while (q->queued > 0) {
                        if (!q->entry[q->tail].done)
@@ -827,10 +828,6 @@ static void mt76u_status_worker(struct mt76_worker *w)
                        mt76_queue_tx_complete(dev, q, &entry);
                }
 
-               wake = q->stopped && q->queued < q->ndesc - 8;
-               if (wake)
-                       q->stopped = false;
-
                if (!q->queued)
                        wake_up(&dev->tx_wait);
 
@@ -839,8 +836,6 @@ static void mt76u_status_worker(struct mt76_worker *w)
                if (dev->drv->tx_status_data &&
                    !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
                        queue_work(dev->wq, &dev->usb.stat_work);
-               if (wake)
-                       ieee80211_wake_queue(dev->hw, i);
        }
 }
 
index a7259dbc953da7a0158b6f8398ef8eff4df02e28..965bd95890459313c975dffbc17156f8e660d109 100644 (file)
@@ -78,7 +78,6 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
 
        rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
                "Firmware callback routine entered!\n");
-       complete(&rtlpriv->firmware_loading_complete);
        if (!firmware) {
                if (rtlpriv->cfg->alt_fw_name) {
                        err = request_firmware(&firmware,
@@ -91,13 +90,13 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
                }
                pr_err("Selected firmware is not available\n");
                rtlpriv->max_fw_size = 0;
-               return;
+               goto exit;
        }
 found_alt:
        if (firmware->size > rtlpriv->max_fw_size) {
                pr_err("Firmware is too big!\n");
                release_firmware(firmware);
-               return;
+               goto exit;
        }
        if (!is_wow) {
                memcpy(rtlpriv->rtlhal.pfirmware, firmware->data,
@@ -109,6 +108,9 @@ found_alt:
                rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
        }
        release_firmware(firmware);
+
+exit:
+       complete(&rtlpriv->firmware_loading_complete);
 }
 
 void rtl_fw_cb(const struct firmware *firmware, void *context)
index ce1b6151944131b3af00c0047ba8f69c858d96f6..200bdd672c281ffc54d7d5bc19be654ca2302d48 100644 (file)
@@ -179,7 +179,7 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
 }
 EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
 
-int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
+static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
 {
        int ret;
 
@@ -192,7 +192,6 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
 
 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
 {
@@ -331,7 +330,7 @@ static inline void nvme_end_req(struct request *req)
                req->__sector = nvme_lba_to_sect(req->q->queuedata,
                        le64_to_cpu(nvme_req(req)->result.u64));
 
-       nvme_trace_bio_complete(req, status);
+       nvme_trace_bio_complete(req);
        blk_mq_end_request(req, status);
 }
 
@@ -578,7 +577,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
 }
 EXPORT_SYMBOL_GPL(nvme_alloc_request);
 
-struct request *nvme_alloc_request_qid(struct request_queue *q,
+static struct request *nvme_alloc_request_qid(struct request_queue *q,
                struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
 {
        struct request *req;
@@ -589,7 +588,6 @@ struct request *nvme_alloc_request_qid(struct request_queue *q,
                nvme_init_request(req, cmd);
        return req;
 }
-EXPORT_SYMBOL_GPL(nvme_alloc_request_qid);
 
 static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
 {
@@ -2858,6 +2856,11 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = {
        NULL,
 };
 
+static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
+{
+       return ctrl->opts && ctrl->opts->discovery_nqn;
+}
+
 static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
                struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
 {
@@ -2877,7 +2880,7 @@ static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
                }
 
                if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
-                   (ctrl->opts && ctrl->opts->discovery_nqn))
+                   nvme_discovery_ctrl(ctrl))
                        continue;
 
                dev_err(ctrl->device,
@@ -3146,7 +3149,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
                        goto out_free;
                }
 
-               if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
+               if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
                        dev_err(ctrl->device,
                                "keep-alive support is mandatory for fabrics\n");
                        ret = -EINVAL;
@@ -3186,7 +3189,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
        if (ret < 0)
                return ret;
 
-       if (!ctrl->identified) {
+       if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
                ret = nvme_hwmon_init(ctrl);
                if (ret < 0)
                        return ret;
index 38373a0e86efb5ae4d8c80a371128b87e7611ca0..5f36cfa8136c0c81812fc7315994705678c45273 100644 (file)
@@ -166,6 +166,7 @@ struct nvme_fc_ctrl {
        struct blk_mq_tag_set   admin_tag_set;
        struct blk_mq_tag_set   tag_set;
 
+       struct work_struct      ioerr_work;
        struct delayed_work     connect_work;
 
        struct kref             ref;
@@ -1888,6 +1889,15 @@ __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
        }
 }
 
+static void
+nvme_fc_ctrl_ioerr_work(struct work_struct *work)
+{
+       struct nvme_fc_ctrl *ctrl =
+                       container_of(work, struct nvme_fc_ctrl, ioerr_work);
+
+       nvme_fc_error_recovery(ctrl, "transport detected io error");
+}
+
 static void
 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
 {
@@ -2046,7 +2056,7 @@ done:
 
 check_error:
        if (terminate_assoc)
-               nvme_fc_error_recovery(ctrl, "transport detected io error");
+               queue_work(nvme_reset_wq, &ctrl->ioerr_work);
 }
 
 static int
@@ -3233,6 +3243,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
 {
        struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
 
+       cancel_work_sync(&ctrl->ioerr_work);
        cancel_delayed_work_sync(&ctrl->connect_work);
        /*
         * kill the association on the link side.  this will block
@@ -3449,6 +3460,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 
        INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
        INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
+       INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
        spin_lock_init(&ctrl->lock);
 
        /* io queue count */
@@ -3540,6 +3552,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 
 fail_ctrl:
        nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
+       cancel_work_sync(&ctrl->ioerr_work);
        cancel_work_sync(&ctrl->ctrl.reset_work);
        cancel_delayed_work_sync(&ctrl->connect_work);
 
index 7e49f61f81df8df7986f8ce45f61df402fd8b726..88a6b97247f504c438ed96a80c79e64785344d17 100644 (file)
@@ -610,8 +610,6 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
 #define NVME_QID_ANY -1
 struct request *nvme_alloc_request(struct request_queue *q,
                struct nvme_command *cmd, blk_mq_req_flags_t flags);
-struct request *nvme_alloc_request_qid(struct request_queue *q,
-               struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
 void nvme_cleanup_cmd(struct request *req);
 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
                struct nvme_command *cmd);
@@ -630,7 +628,6 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
 int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
-int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
 int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
 int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
 
@@ -675,8 +672,7 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
                kblockd_schedule_work(&head->requeue_work);
 }
 
-static inline void nvme_trace_bio_complete(struct request *req,
-        blk_status_t status)
+static inline void nvme_trace_bio_complete(struct request *req)
 {
        struct nvme_ns *ns = req->q->queuedata;
 
@@ -731,8 +727,7 @@ static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
 static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
 {
 }
-static inline void nvme_trace_bio_complete(struct request *req,
-        blk_status_t status)
+static inline void nvme_trace_bio_complete(struct request *req)
 {
 }
 static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
index b4385cb0ff609e64a4c2687b5de19e54585785b0..50d9a20568a28df74abd85584259b246dbe67eac 100644 (file)
@@ -967,6 +967,7 @@ static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
 static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
 {
        struct nvme_completion *cqe = &nvmeq->cqes[idx];
+       __u16 command_id = READ_ONCE(cqe->command_id);
        struct request *req;
 
        /*
@@ -975,17 +976,17 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
         * aborts.  We don't even bother to allocate a struct request
         * for them but rather special case them here.
         */
-       if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) {
+       if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) {
                nvme_complete_async_event(&nvmeq->dev->ctrl,
                                cqe->status, &cqe->result);
                return;
        }
 
-       req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
+       req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), command_id);
        if (unlikely(!req)) {
                dev_warn(nvmeq->dev->ctrl.device,
                        "invalid id %d completed on queue %d\n",
-                       cqe->command_id, le16_to_cpu(cqe->sq_id));
+                       command_id, le16_to_cpu(cqe->sq_id));
                return;
        }
 
@@ -3196,7 +3197,8 @@ static const struct pci_device_id nvme_id_table[] = {
        { PCI_DEVICE(0x144d, 0xa821),   /* Samsung PM1725 */
                .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
        { PCI_DEVICE(0x144d, 0xa822),   /* Samsung PM1725a */
-               .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+               .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
+                               NVME_QUIRK_IGNORE_DEV_SUBNQN, },
        { PCI_DEVICE(0x1d1d, 0x1f1f),   /* LighNVM qemu device */
                .driver_data = NVME_QUIRK_LIGHTNVM, },
        { PCI_DEVICE(0x1d1d, 0x2807),   /* CNEX WL */
index 1ba65992744277faba5c81fba011b80e5bd7d90c..216619926563ed2c40373ba6981da58d5481c235 100644 (file)
@@ -201,7 +201,7 @@ static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
 
 static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
 {
-       return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
+       return min_t(size_t, iov_iter_single_seg_count(&req->iter),
                        req->pdu_len - req->pdu_sent);
 }
 
@@ -262,6 +262,16 @@ static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
        }
 }
 
+static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
+{
+       int ret;
+
+       /* drain the send queue as much as we can... */
+       do {
+               ret = nvme_tcp_try_send(queue);
+       } while (ret > 0);
+}
+
 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
                bool sync, bool last)
 {
@@ -276,10 +286,10 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
         * directly, otherwise queue io_work. Also, only do that if we
         * are on the same cpu, so we don't introduce contention.
         */
-       if (queue->io_cpu == smp_processor_id() &&
+       if (queue->io_cpu == __smp_processor_id() &&
            sync && empty && mutex_trylock(&queue->send_mutex)) {
                queue->more_requests = !last;
-               nvme_tcp_try_send(queue);
+               nvme_tcp_send_all(queue);
                queue->more_requests = false;
                mutex_unlock(&queue->send_mutex);
        } else if (last) {
index 733d9363900e4b4d0fe2c3feb2dd570d1d81c842..68213f0a052bbeb977a3df81785f86f35eed0056 100644 (file)
@@ -1501,7 +1501,8 @@ static ssize_t
 fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
                const char *buf, size_t count)
 {
-       int opcode, starting, amount;
+       unsigned int opcode;
+       int starting, amount;
 
        if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
                return -EBADRQC;
@@ -1588,8 +1589,8 @@ out_destroy_class:
 
 static void __exit fcloop_exit(void)
 {
-       struct fcloop_lport *lport;
-       struct fcloop_nport *nport;
+       struct fcloop_lport *lport = NULL;
+       struct fcloop_nport *nport = NULL;
        struct fcloop_tport *tport;
        struct fcloop_rport *rport;
        unsigned long flags;
index 5c1e7cb7fe0deec61fcc66f1575c8526b02fcd6d..06b6b742bb213e3ba924ef113d81e4081379b2be 100644 (file)
@@ -1220,6 +1220,14 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
        }
        ndev->inline_data_size = nport->inline_data_size;
        ndev->inline_page_count = inline_page_count;
+
+       if (nport->pi_enable && !(cm_id->device->attrs.device_cap_flags &
+                                 IB_DEVICE_INTEGRITY_HANDOVER)) {
+               pr_warn("T10-PI is not supported by device %s. Disabling it\n",
+                       cm_id->device->name);
+               nport->pi_enable = false;
+       }
+
        ndev->device = cm_id->device;
        kref_init(&ndev->ref);
 
@@ -1641,6 +1649,16 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
        spin_lock_irqsave(&queue->state_lock, flags);
        switch (queue->state) {
        case NVMET_RDMA_Q_CONNECTING:
+               while (!list_empty(&queue->rsp_wait_list)) {
+                       struct nvmet_rdma_rsp *rsp;
+
+                       rsp = list_first_entry(&queue->rsp_wait_list,
+                                              struct nvmet_rdma_rsp,
+                                              wait_list);
+                       list_del(&rsp->wait_list);
+                       nvmet_rdma_put_rsp(rsp);
+               }
+               fallthrough;
        case NVMET_RDMA_Q_LIVE:
                queue->state = NVMET_RDMA_Q_DISCONNECTING;
                disconnect = true;
@@ -1845,14 +1863,6 @@ static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
                goto out_destroy_id;
        }
 
-       if (port->nport->pi_enable &&
-           !(cm_id->device->attrs.device_cap_flags &
-             IB_DEVICE_INTEGRITY_HANDOVER)) {
-               pr_err("T10-PI is not supported for %pISpcs\n", addr);
-               ret = -EINVAL;
-               goto out_destroy_id;
-       }
-
        port->cm_id = cm_id;
        return 0;
 
index 4268eb35991523a539e968c321a9693d5aa70809..8c905aabacc01aeee5d35501525ae4485913ea0e 100644 (file)
@@ -1092,7 +1092,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
        if (IS_ERR(opp_table->clk)) {
                ret = PTR_ERR(opp_table->clk);
                if (ret == -EPROBE_DEFER)
-                       goto err;
+                       goto remove_opp_dev;
 
                dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
        }
@@ -1101,7 +1101,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
        ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
        if (ret) {
                if (ret == -EPROBE_DEFER)
-                       goto err;
+                       goto put_clk;
 
                dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
                         __func__, ret);
@@ -1113,6 +1113,11 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
 
        return opp_table;
 
+put_clk:
+       if (!IS_ERR(opp_table->clk))
+               clk_put(opp_table->clk);
+remove_opp_dev:
+       _remove_opp_dev(opp_dev, opp_table);
 err:
        kfree(opp_table);
        return ERR_PTR(ret);
index 794a37d5085376c54b571274f360f9c427d5c5d9..cb2f55f450e4adf05a7b3d81e8b9a01230665584 100644 (file)
@@ -726,11 +726,6 @@ static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
        return per_cpu(hw_events->irq, cpu);
 }
 
-bool arm_pmu_irq_is_nmi(void)
-{
-       return has_nmi;
-}
-
 /*
  * PMU hardware loses all context when a CPU goes offline.
  * When a CPU is hotplugged back in, since some hardware registers are
index 476d7c7fe70a72f2dc0824b2be825fd15a8c351d..f2edef0df40f5cebd5e650cfefe5cac046884838 100644 (file)
@@ -64,6 +64,7 @@ config DP83640_PHY
        depends on NETWORK_PHY_TIMESTAMPING
        depends on PHYLIB
        depends on PTP_1588_CLOCK
+       select CRC32
        help
          Supports the DP83640 PHYTER with IEEE 1588 features.
 
@@ -78,6 +79,7 @@ config DP83640_PHY
 config PTP_1588_CLOCK_INES
        tristate "ZHAW InES PTP time stamping IP core"
        depends on NETWORK_PHY_TIMESTAMPING
+       depends on HAS_IOMEM
        depends on PHYLIB
        depends on PTP_1588_CLOCK
        help
index 53fa84f4d1e197f837e23029c0672c1a87171fcb..5abdd29fb9f333e097852cc8ddba2e95bb5705d9 100644 (file)
@@ -881,6 +881,7 @@ config REGULATOR_QCOM_RPM
 config REGULATOR_QCOM_RPMH
        tristate "Qualcomm Technologies, Inc. RPMh regulator driver"
        depends on QCOM_RPMH || (QCOM_RPMH=n && COMPILE_TEST)
+       depends on QCOM_COMMAND_DB || (QCOM_COMMAND_DB=n && COMPILE_TEST)
        help
          This driver supports control of PMIC regulators via the RPMh hardware
          block found on Qualcomm Technologies Inc. SoCs.  RPMh regulator
index e6d5d98c3ceaba048653ce26ed94f49db74845c6..9309765d0450ec8fe9f1eca04f5b638fd8db2e86 100644 (file)
 #include <linux/regulator/of_regulator.h>
 #include <linux/slab.h>
 
+/* Typical regulator startup times as per data sheet in uS */
+#define BD71847_BUCK1_STARTUP_TIME 144
+#define BD71847_BUCK2_STARTUP_TIME 162
+#define BD71847_BUCK3_STARTUP_TIME 162
+#define BD71847_BUCK4_STARTUP_TIME 240
+#define BD71847_BUCK5_STARTUP_TIME 270
+#define BD71847_BUCK6_STARTUP_TIME 200
+#define BD71847_LDO1_STARTUP_TIME  440
+#define BD71847_LDO2_STARTUP_TIME  370
+#define BD71847_LDO3_STARTUP_TIME  310
+#define BD71847_LDO4_STARTUP_TIME  400
+#define BD71847_LDO5_STARTUP_TIME  530
+#define BD71847_LDO6_STARTUP_TIME  400
+
+#define BD71837_BUCK1_STARTUP_TIME 160
+#define BD71837_BUCK2_STARTUP_TIME 180
+#define BD71837_BUCK3_STARTUP_TIME 180
+#define BD71837_BUCK4_STARTUP_TIME 180
+#define BD71837_BUCK5_STARTUP_TIME 160
+#define BD71837_BUCK6_STARTUP_TIME 240
+#define BD71837_BUCK7_STARTUP_TIME 220
+#define BD71837_BUCK8_STARTUP_TIME 200
+#define BD71837_LDO1_STARTUP_TIME  440
+#define BD71837_LDO2_STARTUP_TIME  370
+#define BD71837_LDO3_STARTUP_TIME  310
+#define BD71837_LDO4_STARTUP_TIME  400
+#define BD71837_LDO5_STARTUP_TIME  310
+#define BD71837_LDO6_STARTUP_TIME  400
+#define BD71837_LDO7_STARTUP_TIME  530
+
 /*
  * BD718(37/47/50) have two "enable control modes". ON/OFF can either be
  * controlled by software - or by PMIC internal HW state machine. Whether
@@ -613,6 +643,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_mask = DVS_BUCK_RUN_MASK,
                        .enable_reg = BD718XX_REG_BUCK1_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71847_BUCK1_STARTUP_TIME,
                        .owner = THIS_MODULE,
                        .of_parse_cb = buck_set_hw_dvs_levels,
                },
@@ -646,6 +677,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_mask = DVS_BUCK_RUN_MASK,
                        .enable_reg = BD718XX_REG_BUCK2_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71847_BUCK2_STARTUP_TIME,
                        .owner = THIS_MODULE,
                        .of_parse_cb = buck_set_hw_dvs_levels,
                },
@@ -680,6 +712,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .linear_range_selectors = bd71847_buck3_volt_range_sel,
                        .enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71847_BUCK3_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -706,6 +739,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_range_mask = BD71847_BUCK4_RANGE_MASK,
                        .linear_range_selectors = bd71847_buck4_volt_range_sel,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71847_BUCK4_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -727,6 +761,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
                        .enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71847_BUCK5_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -750,6 +785,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
                        .enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71847_BUCK6_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -775,6 +811,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .linear_range_selectors = bd718xx_ldo1_volt_range_sel,
                        .enable_reg = BD718XX_REG_LDO1_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71847_LDO1_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -796,6 +833,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .n_voltages = ARRAY_SIZE(ldo_2_volts),
                        .enable_reg = BD718XX_REG_LDO2_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71847_LDO2_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -818,6 +856,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_mask = BD718XX_LDO3_MASK,
                        .enable_reg = BD718XX_REG_LDO3_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71847_LDO3_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -840,6 +879,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_mask = BD718XX_LDO4_MASK,
                        .enable_reg = BD718XX_REG_LDO4_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71847_LDO4_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -865,6 +905,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .linear_range_selectors = bd71847_ldo5_volt_range_sel,
                        .enable_reg = BD718XX_REG_LDO5_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71847_LDO5_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -889,6 +930,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_mask = BD718XX_LDO6_MASK,
                        .enable_reg = BD718XX_REG_LDO6_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71847_LDO6_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -942,6 +984,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = DVS_BUCK_RUN_MASK,
                        .enable_reg = BD718XX_REG_BUCK1_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK1_STARTUP_TIME,
                        .owner = THIS_MODULE,
                        .of_parse_cb = buck_set_hw_dvs_levels,
                },
@@ -975,6 +1018,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = DVS_BUCK_RUN_MASK,
                        .enable_reg = BD718XX_REG_BUCK2_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK2_STARTUP_TIME,
                        .owner = THIS_MODULE,
                        .of_parse_cb = buck_set_hw_dvs_levels,
                },
@@ -1005,6 +1049,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = DVS_BUCK_RUN_MASK,
                        .enable_reg = BD71837_REG_BUCK3_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK3_STARTUP_TIME,
                        .owner = THIS_MODULE,
                        .of_parse_cb = buck_set_hw_dvs_levels,
                },
@@ -1033,6 +1078,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = DVS_BUCK_RUN_MASK,
                        .enable_reg = BD71837_REG_BUCK4_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK4_STARTUP_TIME,
                        .owner = THIS_MODULE,
                        .of_parse_cb = buck_set_hw_dvs_levels,
                },
@@ -1065,6 +1111,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .linear_range_selectors = bd71837_buck5_volt_range_sel,
                        .enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK5_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1088,6 +1135,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD71837_BUCK6_MASK,
                        .enable_reg = BD718XX_REG_2ND_NODVS_BUCK_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK6_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1109,6 +1157,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
                        .enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK7_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1132,6 +1181,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
                        .enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK8_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1157,6 +1207,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .linear_range_selectors = bd718xx_ldo1_volt_range_sel,
                        .enable_reg = BD718XX_REG_LDO1_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71837_LDO1_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1178,6 +1229,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .n_voltages = ARRAY_SIZE(ldo_2_volts),
                        .enable_reg = BD718XX_REG_LDO2_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71837_LDO2_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1200,6 +1252,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD718XX_LDO3_MASK,
                        .enable_reg = BD718XX_REG_LDO3_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71837_LDO3_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1222,6 +1275,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD718XX_LDO4_MASK,
                        .enable_reg = BD718XX_REG_LDO4_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71837_LDO4_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1246,6 +1300,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD71837_LDO5_MASK,
                        .enable_reg = BD718XX_REG_LDO5_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71837_LDO5_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1272,6 +1327,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD718XX_LDO6_MASK,
                        .enable_reg = BD718XX_REG_LDO6_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71837_LDO6_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1296,6 +1352,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD71837_LDO7_MASK,
                        .enable_reg = BD71837_REG_LDO7_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71837_LDO7_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
index 308c27fa6ea80b85e82aba082f898cd910ee2e1b..af9918cd27aa435ef52a2c424811a830eec59795 100644 (file)
@@ -469,13 +469,17 @@ static int pf8x00_i2c_probe(struct i2c_client *client)
 }
 
 static const struct of_device_id pf8x00_dt_ids[] = {
-       { .compatible = "nxp,pf8x00",},
+       { .compatible = "nxp,pf8100",},
+       { .compatible = "nxp,pf8121a",},
+       { .compatible = "nxp,pf8200",},
        { }
 };
 MODULE_DEVICE_TABLE(of, pf8x00_dt_ids);
 
 static const struct i2c_device_id pf8x00_i2c_id[] = {
-       { "pf8x00", 0 },
+       { "pf8100", 0 },
+       { "pf8121a", 0 },
+       { "pf8200", 0 },
        {},
 };
 MODULE_DEVICE_TABLE(i2c, pf8x00_i2c_id);
index fe030ec4b7db41552e5cb499d6b6f2c7e2484dcf..c395a8dda6f7c3a5875600109846da218e7a6e81 100644 (file)
@@ -726,7 +726,7 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = {
 static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
        .regulator_type = VRM,
        .ops = &rpmh_regulator_vrm_ops,
-       .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 1600),
+       .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 16000),
        .n_voltages = 5,
        .pmic_mode_map = pmic_mode_map_pmic5_smps,
        .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
index 6f5ddc3eab8c5243f32f7c75e2edeee8dd416cc1..28f637042d444366db28ccddea19145ef6c2d1c4 100644 (file)
@@ -1079,7 +1079,8 @@ struct qeth_card *qeth_get_card_by_busid(char *bus_id);
 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
                              int clear_start_mask);
 int qeth_threads_running(struct qeth_card *, unsigned long);
-int qeth_set_offline(struct qeth_card *card, bool resetting);
+int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
+                    bool resetting);
 
 int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
                  int (*reply_cb)
index f4b60294a9695205c9bff6887c008382cd569bd3..cf18d87da41e21cbd9e8fbd4703d3c8ed658ad21 100644 (file)
@@ -5507,12 +5507,12 @@ out:
        return rc;
 }
 
-static int qeth_set_online(struct qeth_card *card)
+static int qeth_set_online(struct qeth_card *card,
+                          const struct qeth_discipline *disc)
 {
        bool carrier_ok;
        int rc;
 
-       mutex_lock(&card->discipline_mutex);
        mutex_lock(&card->conf_mutex);
        QETH_CARD_TEXT(card, 2, "setonlin");
 
@@ -5529,7 +5529,7 @@ static int qeth_set_online(struct qeth_card *card)
                /* no need for locking / error handling at this early stage: */
                qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
 
-       rc = card->discipline->set_online(card, carrier_ok);
+       rc = disc->set_online(card, carrier_ok);
        if (rc)
                goto err_online;
 
@@ -5537,7 +5537,6 @@ static int qeth_set_online(struct qeth_card *card)
        kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
 
        mutex_unlock(&card->conf_mutex);
-       mutex_unlock(&card->discipline_mutex);
        return 0;
 
 err_online:
@@ -5552,15 +5551,14 @@ err_hardsetup:
        qdio_free(CARD_DDEV(card));
 
        mutex_unlock(&card->conf_mutex);
-       mutex_unlock(&card->discipline_mutex);
        return rc;
 }
 
-int qeth_set_offline(struct qeth_card *card, bool resetting)
+int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
+                    bool resetting)
 {
        int rc, rc2, rc3;
 
-       mutex_lock(&card->discipline_mutex);
        mutex_lock(&card->conf_mutex);
        QETH_CARD_TEXT(card, 3, "setoffl");
 
@@ -5581,7 +5579,7 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
 
        cancel_work_sync(&card->rx_mode_work);
 
-       card->discipline->set_offline(card);
+       disc->set_offline(card);
 
        qeth_qdio_clear_card(card, 0);
        qeth_drain_output_queues(card);
@@ -5602,16 +5600,19 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
        kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
 
        mutex_unlock(&card->conf_mutex);
-       mutex_unlock(&card->discipline_mutex);
        return 0;
 }
 EXPORT_SYMBOL_GPL(qeth_set_offline);
 
 static int qeth_do_reset(void *data)
 {
+       const struct qeth_discipline *disc;
        struct qeth_card *card = data;
        int rc;
 
+       /* Lock-free, other users will block until we are done. */
+       disc = card->discipline;
+
        QETH_CARD_TEXT(card, 2, "recover1");
        if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
                return 0;
@@ -5619,8 +5620,8 @@ static int qeth_do_reset(void *data)
        dev_warn(&card->gdev->dev,
                 "A recovery process has been started for the device\n");
 
-       qeth_set_offline(card, true);
-       rc = qeth_set_online(card);
+       qeth_set_offline(card, disc, true);
+       rc = qeth_set_online(card, disc);
        if (!rc) {
                dev_info(&card->gdev->dev,
                         "Device successfully recovered!\n");
@@ -6584,6 +6585,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
                break;
        default:
                card->info.layer_enforced = true;
+               /* It's so early that we don't need the discipline_mutex yet. */
                rc = qeth_core_load_discipline(card, enforced_disc);
                if (rc)
                        goto err_load;
@@ -6616,10 +6618,12 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
 
        QETH_CARD_TEXT(card, 2, "removedv");
 
+       mutex_lock(&card->discipline_mutex);
        if (card->discipline) {
                card->discipline->remove(gdev);
                qeth_core_free_discipline(card);
        }
+       mutex_unlock(&card->discipline_mutex);
 
        qeth_free_qdio_queues(card);
 
@@ -6634,6 +6638,7 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
        int rc = 0;
        enum qeth_discipline_id def_discipline;
 
+       mutex_lock(&card->discipline_mutex);
        if (!card->discipline) {
                def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
                                                QETH_DISCIPLINE_LAYER2;
@@ -6647,16 +6652,23 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
                }
        }
 
-       rc = qeth_set_online(card);
+       rc = qeth_set_online(card, card->discipline);
+
 err:
+       mutex_unlock(&card->discipline_mutex);
        return rc;
 }
 
 static int qeth_core_set_offline(struct ccwgroup_device *gdev)
 {
        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+       int rc;
 
-       return qeth_set_offline(card, false);
+       mutex_lock(&card->discipline_mutex);
+       rc = qeth_set_offline(card, card->discipline, false);
+       mutex_unlock(&card->discipline_mutex);
+
+       return rc;
 }
 
 static void qeth_core_shutdown(struct ccwgroup_device *gdev)
index 4ed0fb0705a5203804d52b22dfc2b77094040bef..4254caf1d9b699e062b2e98a1afdcd1eeec579e3 100644 (file)
@@ -2208,7 +2208,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
        wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
 
        if (gdev->state == CCWGROUP_ONLINE)
-               qeth_set_offline(card, false);
+               qeth_set_offline(card, card->discipline, false);
 
        cancel_work_sync(&card->close_dev_work);
        if (card->dev->reg_state == NETREG_REGISTERED)
index d138ac432d0129c1912bedef4e6860e0d614d4bb..4c2cae7ae9a7fab2fcba4ce83e4dfa0e64ee8489 100644 (file)
@@ -1813,7 +1813,7 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
                                                    struct net_device *dev,
                                                    netdev_features_t features)
 {
-       if (qeth_get_ip_version(skb) != 4)
+       if (vlan_get_protocol(skb) != htons(ETH_P_IP))
                features &= ~NETIF_F_HW_VLAN_CTAG_TX;
        return qeth_features_check(skb, dev, features);
 }
@@ -1971,7 +1971,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
        wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
 
        if (cgdev->state == CCWGROUP_ONLINE)
-               qeth_set_offline(card, false);
+               qeth_set_offline(card, card->discipline, false);
 
        cancel_work_sync(&card->close_dev_work);
        if (card->dev->reg_state == NETREG_REGISTERED)
index b206e266b4e7263203c215969fb9b9346c0d16ef..8b0deece9758b8d6bdd0d02dd0b56cb3278bebc9 100644 (file)
@@ -4,6 +4,7 @@ config SCSI_CXGB4_ISCSI
        depends on PCI && INET && (IPV6 || IPV6=n)
        depends on THERMAL || !THERMAL
        depends on ETHERNET
+       depends on TLS || TLS=n
        select NET_VENDOR_CHELSIO
        select CHELSIO_T4
        select CHELSIO_LIB
index 2b28dd405600326de2d3c81e08ada7594efb3a8f..e821dd32dd28088e71119fd16c0bea526c37b2f3 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/debugfs.h>
 #include <linux/dmapool.h>
 #include <linux/iopoll.h>
+#include <linux/irq.h>
 #include <linux/lcm.h>
 #include <linux/libata.h>
 #include <linux/mfd/syscon.h>
@@ -294,6 +295,7 @@ enum {
 
 struct hisi_sas_hw {
        int (*hw_init)(struct hisi_hba *hisi_hba);
+       int (*interrupt_preinit)(struct hisi_hba *hisi_hba);
        void (*setup_itct)(struct hisi_hba *hisi_hba,
                           struct hisi_sas_device *device);
        int (*slot_index_alloc)(struct hisi_hba *hisi_hba,
@@ -393,6 +395,8 @@ struct hisi_hba {
        u32 refclk_frequency_mhz;
        u8 sas_addr[SAS_ADDR_SIZE];
 
+       int *irq_map; /* v2 hw */
+
        int n_phy;
        spinlock_t lock;
        struct semaphore sem;
index b6d4419c32f2723c94d808f0ea7a911632dce562..cf0bfac920a8181d0c51c14d5ba67730a1957f7d 100644 (file)
@@ -2614,6 +2614,13 @@ err_out:
        return NULL;
 }
 
+static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba)
+{
+       if (hisi_hba->hw->interrupt_preinit)
+               return hisi_hba->hw->interrupt_preinit(hisi_hba);
+       return 0;
+}
+
 int hisi_sas_probe(struct platform_device *pdev,
                   const struct hisi_sas_hw *hw)
 {
@@ -2671,6 +2678,10 @@ int hisi_sas_probe(struct platform_device *pdev,
                sha->sas_port[i] = &hisi_hba->port[i].sas_port;
        }
 
+       rc = hisi_sas_interrupt_preinit(hisi_hba);
+       if (rc)
+               goto err_out_ha;
+
        rc = scsi_add_host(shost, &pdev->dev);
        if (rc)
                goto err_out_ha;
index b57177b52facc3d2b7e55d0de43d23f532e839b8..9adfdefef9caddda06325d871e75ac0333db75fb 100644 (file)
@@ -3302,6 +3302,28 @@ static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = {
        fatal_axi_int_v2_hw
 };
 
+#define CQ0_IRQ_INDEX (96)
+
+static int hisi_sas_v2_interrupt_preinit(struct hisi_hba *hisi_hba)
+{
+       struct platform_device *pdev = hisi_hba->platform_dev;
+       struct Scsi_Host *shost = hisi_hba->shost;
+       struct irq_affinity desc = {
+               .pre_vectors = CQ0_IRQ_INDEX,
+               .post_vectors = 16,
+       };
+       int resv = desc.pre_vectors + desc.post_vectors, minvec = resv + 1, nvec;
+
+       nvec = devm_platform_get_irqs_affinity(pdev, &desc, minvec, 128,
+                                              &hisi_hba->irq_map);
+       if (nvec < 0)
+               return nvec;
+
+       shost->nr_hw_queues = hisi_hba->cq_nvecs = nvec - resv;
+
+       return 0;
+}
+
 /*
  * There is a limitation in the hip06 chipset that we need
  * to map in all mbigen interrupts, even if they are not used.
@@ -3310,14 +3332,11 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
 {
        struct platform_device *pdev = hisi_hba->platform_dev;
        struct device *dev = &pdev->dev;
-       int irq, rc = 0, irq_map[128];
+       int irq, rc = 0;
        int i, phy_no, fatal_no, queue_no;
 
-       for (i = 0; i < 128; i++)
-               irq_map[i] = platform_get_irq(pdev, i);
-
        for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) {
-               irq = irq_map[i + 1]; /* Phy up/down is irq1 */
+               irq = hisi_hba->irq_map[i + 1]; /* Phy up/down is irq1 */
                rc = devm_request_irq(dev, irq, phy_interrupts[i], 0,
                                      DRV_NAME " phy", hisi_hba);
                if (rc) {
@@ -3331,7 +3350,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
        for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
                struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
 
-               irq = irq_map[phy_no + 72];
+               irq = hisi_hba->irq_map[phy_no + 72];
                rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0,
                                      DRV_NAME " sata", phy);
                if (rc) {
@@ -3343,7 +3362,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
        }
 
        for (fatal_no = 0; fatal_no < HISI_SAS_FATAL_INT_NR; fatal_no++) {
-               irq = irq_map[fatal_no + 81];
+               irq = hisi_hba->irq_map[fatal_no + 81];
                rc = devm_request_irq(dev, irq, fatal_interrupts[fatal_no], 0,
                                      DRV_NAME " fatal", hisi_hba);
                if (rc) {
@@ -3354,24 +3373,22 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
                }
        }
 
-       for (queue_no = 0; queue_no < hisi_hba->queue_count; queue_no++) {
+       for (queue_no = 0; queue_no < hisi_hba->cq_nvecs; queue_no++) {
                struct hisi_sas_cq *cq = &hisi_hba->cq[queue_no];
 
-               cq->irq_no = irq_map[queue_no + 96];
+               cq->irq_no = hisi_hba->irq_map[queue_no + 96];
                rc = devm_request_threaded_irq(dev, cq->irq_no,
                                               cq_interrupt_v2_hw,
                                               cq_thread_v2_hw, IRQF_ONESHOT,
                                               DRV_NAME " cq", cq);
                if (rc) {
                        dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n",
-                               irq, rc);
+                                       cq->irq_no, rc);
                        rc = -ENOENT;
                        goto err_out;
                }
+               cq->irq_mask = irq_get_affinity_mask(cq->irq_no);
        }
-
-       hisi_hba->cq_nvecs = hisi_hba->queue_count;
-
 err_out:
        return rc;
 }
@@ -3529,6 +3546,26 @@ static struct device_attribute *host_attrs_v2_hw[] = {
        NULL
 };
 
+static int map_queues_v2_hw(struct Scsi_Host *shost)
+{
+       struct hisi_hba *hisi_hba = shost_priv(shost);
+       struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+       const struct cpumask *mask;
+       unsigned int queue, cpu;
+
+       for (queue = 0; queue < qmap->nr_queues; queue++) {
+               mask = irq_get_affinity_mask(hisi_hba->irq_map[96 + queue]);
+               if (!mask)
+                       continue;
+
+               for_each_cpu(cpu, mask)
+                       qmap->mq_map[cpu] = qmap->queue_offset + queue;
+       }
+
+       return 0;
+
+}
+
 static struct scsi_host_template sht_v2_hw = {
        .name                   = DRV_NAME,
        .proc_name              = DRV_NAME,
@@ -3553,10 +3590,13 @@ static struct scsi_host_template sht_v2_hw = {
 #endif
        .shost_attrs            = host_attrs_v2_hw,
        .host_reset             = hisi_sas_host_reset,
+       .map_queues             = map_queues_v2_hw,
+       .host_tagset            = 1,
 };
 
 static const struct hisi_sas_hw hisi_sas_v2_hw = {
        .hw_init = hisi_sas_v2_init,
+       .interrupt_preinit = hisi_sas_v2_interrupt_preinit,
        .setup_itct = setup_itct_v2_hw,
        .slot_index_alloc = slot_index_alloc_quirk_v2_hw,
        .alloc_dev = alloc_dev_quirk_v2_hw,
index 6e4bf05c6d77f6b7bda8a121ca4282075a5b10ec..af192096a82b172ec188d83e2619f8da4216f21d 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/poll.h>
 #include <linux/vmalloc.h>
 #include <linux/irq_poll.h>
+#include <linux/blk-mq-pci.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -113,6 +114,10 @@ unsigned int enable_sdev_max_qd;
 module_param(enable_sdev_max_qd, int, 0444);
 MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
 
+int host_tagset_enable = 1;
+module_param(host_tagset_enable, int, 0444);
+MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
+
 MODULE_LICENSE("GPL");
 MODULE_VERSION(MEGASAS_VERSION);
 MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
@@ -3119,6 +3124,19 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
        return 0;
 }
 
+static int megasas_map_queues(struct Scsi_Host *shost)
+{
+       struct megasas_instance *instance;
+
+       instance = (struct megasas_instance *)shost->hostdata;
+
+       if (shost->nr_hw_queues == 1)
+               return 0;
+
+       return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+                       instance->pdev, instance->low_latency_index_start);
+}
+
 static void megasas_aen_polling(struct work_struct *work);
 
 /**
@@ -3427,6 +3445,7 @@ static struct scsi_host_template megasas_template = {
        .eh_timed_out = megasas_reset_timer,
        .shost_attrs = megaraid_host_attrs,
        .bios_param = megasas_bios_param,
+       .map_queues = megasas_map_queues,
        .change_queue_depth = scsi_change_queue_depth,
        .max_segment_size = 0xffffffff,
 };
@@ -6808,6 +6827,26 @@ static int megasas_io_attach(struct megasas_instance *instance)
        host->max_lun = MEGASAS_MAX_LUN;
        host->max_cmd_len = 16;
 
+       /* Use shared host tagset only for fusion adaptors
+        * if there are managed interrupts (smp affinity enabled case).
+        * Single msix_vectors in kdump, so shared host tag is also disabled.
+        */
+
+       host->host_tagset = 0;
+       host->nr_hw_queues = 1;
+
+       if ((instance->adapter_type != MFI_SERIES) &&
+               (instance->msix_vectors > instance->low_latency_index_start) &&
+               host_tagset_enable &&
+               instance->smp_affinity_enable) {
+               host->host_tagset = 1;
+               host->nr_hw_queues = instance->msix_vectors -
+                       instance->low_latency_index_start;
+       }
+
+       dev_info(&instance->pdev->dev,
+               "Max firmware commands: %d shared with nr_hw_queues = %d\n",
+               instance->max_fw_cmds, host->nr_hw_queues);
        /*
         * Notify the mid-layer about the new controller
         */
index b0c01cf0428f2f8b507421c7251b26a1cfa4540d..fd607287608e1b0623b27ad40bb103c56ce8dd13 100644 (file)
@@ -359,24 +359,29 @@ megasas_get_msix_index(struct megasas_instance *instance,
 {
        int sdev_busy;
 
-       /* nr_hw_queue = 1 for MegaRAID */
-       struct blk_mq_hw_ctx *hctx =
-               scmd->device->request_queue->queue_hw_ctx[0];
-
-       sdev_busy = atomic_read(&hctx->nr_active);
+       /* TBD - if sml remove device_busy in future, driver
+        * should track counter in internal structure.
+        */
+       sdev_busy = atomic_read(&scmd->device->device_busy);
 
        if (instance->perf_mode == MR_BALANCED_PERF_MODE &&
-           sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))
+           sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) {
                cmd->request_desc->SCSIIO.MSIxIndex =
                        mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
                                        MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
-       else if (instance->msix_load_balance)
+       } else if (instance->msix_load_balance) {
                cmd->request_desc->SCSIIO.MSIxIndex =
                        (mega_mod64(atomic64_add_return(1, &instance->total_io_count),
                                instance->msix_vectors));
-       else
+       } else if (instance->host->nr_hw_queues > 1) {
+               u32 tag = blk_mq_unique_tag(scmd->request);
+
+               cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) +
+                       instance->low_latency_index_start;
+       } else {
                cmd->request_desc->SCSIIO.MSIxIndex =
                        instance->reply_map[raw_smp_processor_id()];
+       }
 }
 
 /**
@@ -956,9 +961,6 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
        if (megasas_alloc_cmdlist_fusion(instance))
                goto fail_exit;
 
-       dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n",
-                instance->max_fw_cmds);
-
        /* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
        io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
        io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
@@ -1102,8 +1104,9 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
                MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing)
                instance->perf_mode = MR_BALANCED_PERF_MODE;
 
-       dev_info(&instance->pdev->dev, "Performance mode :%s\n",
-               MEGASAS_PERF_MODE_2STR(instance->perf_mode));
+       dev_info(&instance->pdev->dev, "Performance mode :%s (latency index = %d)\n",
+               MEGASAS_PERF_MODE_2STR(instance->perf_mode),
+               instance->low_latency_index_start);
 
        instance->fw_sync_cache_support = (scratch_pad_1 &
                MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
index 86209455172d69e440b8794b55342d18d065b81e..c299f7e078fb97c79a13d57c1f7918c4fd84e2a5 100644 (file)
@@ -79,5 +79,5 @@ config SCSI_MPT2SAS
        select SCSI_MPT3SAS
        depends on PCI && SCSI
        help
-       Dummy config option for backwards compatiblity: configure the MPT3SAS
+       Dummy config option for backwards compatibility: configure the MPT3SAS
        driver instead.
index 969baf4cd3f5e9e5fab717afc787e3f4e2bad1f5..6e23dc3209feb9ecf58bf45666f33f21f407d025 100644 (file)
@@ -5034,7 +5034,7 @@ _base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc)
 static void
 _base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
 {
-       u16 trigger_flags;
+       int trigger_flags;
 
        /*
         * Default setting of master trigger.
index f5fc7f518f8afea5e05504400450422b0905bc42..47ad64b06623696ca510da02ffafa0d0149256ce 100644 (file)
@@ -2245,7 +2245,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
                             chap_name);
                break;
        case ISCSI_BOOT_TGT_CHAP_SECRET:
-               rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+               rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN,
                             chap_secret);
                break;
        case ISCSI_BOOT_TGT_REV_CHAP_NAME:
@@ -2253,7 +2253,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
                             mchap_name);
                break;
        case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
-               rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+               rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN,
                             mchap_secret);
                break;
        case ISCSI_BOOT_TGT_FLAGS:
index 24c0f7ec03511b31791ad399f43e10e88b20fb6a..4a08c450b756f3399116e4dc61ae67e4b8c20d53 100644 (file)
@@ -6740,7 +6740,7 @@ static int __init scsi_debug_init(void)
                k = sdeb_zbc_model_str(sdeb_zbc_model_s);
                if (k < 0) {
                        ret = k;
-                       goto free_vm;
+                       goto free_q_arr;
                }
                sdeb_zbc_model = k;
                switch (sdeb_zbc_model) {
@@ -6753,7 +6753,8 @@ static int __init scsi_debug_init(void)
                        break;
                default:
                        pr_err("Invalid ZBC model\n");
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto free_q_arr;
                }
        }
        if (sdeb_zbc_model != BLK_ZONED_NONE) {
index 4848ae3c7b561d0c3b9e9d68c2b0d95bf44b232a..b3f14f05340ad63a9fe5d4377cfb118d57d4a937 100644 (file)
@@ -249,7 +249,8 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
 
        req = blk_get_request(sdev->request_queue,
                        data_direction == DMA_TO_DEVICE ?
-                       REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT);
+                       REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
+                       rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0);
        if (IS_ERR(req))
                return ret;
        rq = scsi_req(req);
@@ -1206,6 +1207,8 @@ static blk_status_t
 scsi_device_state_check(struct scsi_device *sdev, struct request *req)
 {
        switch (sdev->sdev_state) {
+       case SDEV_CREATED:
+               return BLK_STS_OK;
        case SDEV_OFFLINE:
        case SDEV_TRANSPORT_OFFLINE:
                /*
@@ -1232,18 +1235,18 @@ scsi_device_state_check(struct scsi_device *sdev, struct request *req)
                return BLK_STS_RESOURCE;
        case SDEV_QUIESCE:
                /*
-                * If the devices is blocked we defer normal commands.
+                * If the device is blocked we only accept power management
+                * commands.
                 */
-               if (req && !(req->rq_flags & RQF_PREEMPT))
+               if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM)))
                        return BLK_STS_RESOURCE;
                return BLK_STS_OK;
        default:
                /*
                 * For any other not fully online state we only allow
-                * special commands.  In particular any user initiated
-                * command is not allowed.
+                * power management commands.
                 */
-               if (req && !(req->rq_flags & RQF_PREEMPT))
+               if (req && !(req->rq_flags & RQF_PM))
                        return BLK_STS_IOERR;
                return BLK_STS_OK;
        }
@@ -2516,15 +2519,13 @@ void sdev_evt_send_simple(struct scsi_device *sdev,
 EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
 
 /**
- *     scsi_device_quiesce - Block user issued commands.
+ *     scsi_device_quiesce - Block all commands except power management.
  *     @sdev:  scsi device to quiesce.
  *
  *     This works by trying to transition to the SDEV_QUIESCE state
  *     (which must be a legal transition).  When the device is in this
- *     state, only special requests will be accepted, all others will
- *     be deferred.  Since special requests may also be requeued requests,
- *     a successful return doesn't guarantee the device will be
- *     totally quiescent.
+ *     state, only power management requests will be accepted, all others will
+ *     be deferred.
  *
  *     Must be called with user context, may sleep.
  *
@@ -2586,12 +2587,12 @@ void scsi_device_resume(struct scsi_device *sdev)
         * device deleted during suspend)
         */
        mutex_lock(&sdev->state_mutex);
+       if (sdev->sdev_state == SDEV_QUIESCE)
+               scsi_device_set_state(sdev, SDEV_RUNNING);
        if (sdev->quiesced_by) {
                sdev->quiesced_by = NULL;
                blk_clear_pm_only(sdev->request_queue);
        }
-       if (sdev->sdev_state == SDEV_QUIESCE)
-               scsi_device_set_state(sdev, SDEV_RUNNING);
        mutex_unlock(&sdev->state_mutex);
 }
 EXPORT_SYMBOL(scsi_device_resume);
index f3d5b1bbd5aa7eff36c5d91cd9d2df6e41a77f8d..c37dd15d16d24f20b6065e6d7141da6c955fd58c 100644 (file)
@@ -117,12 +117,16 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
                sshdr = &sshdr_tmp;
 
        for(i = 0; i < DV_RETRIES; i++) {
+               /*
+                * The purpose of the RQF_PM flag below is to bypass the
+                * SDEV_QUIESCE state.
+                */
                result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense,
                                      sshdr, DV_TIMEOUT, /* retries */ 1,
                                      REQ_FAILFAST_DEV |
                                      REQ_FAILFAST_TRANSPORT |
                                      REQ_FAILFAST_DRIVER,
-                                     0, NULL);
+                                     RQF_PM, NULL);
                if (driver_byte(result) != DRIVER_SENSE ||
                    sshdr->sense_key != UNIT_ATTENTION)
                        break;
@@ -1005,23 +1009,26 @@ spi_dv_device(struct scsi_device *sdev)
         */
        lock_system_sleep();
 
+       if (scsi_autopm_get_device(sdev))
+               goto unlock_system_sleep;
+
        if (unlikely(spi_dv_in_progress(starget)))
-               goto unlock;
+               goto put_autopm;
 
        if (unlikely(scsi_device_get(sdev)))
-               goto unlock;
+               goto put_autopm;
 
        spi_dv_in_progress(starget) = 1;
 
        buffer = kzalloc(len, GFP_KERNEL);
 
        if (unlikely(!buffer))
-               goto out_put;
+               goto put_sdev;
 
        /* We need to verify that the actual device will quiesce; the
         * later target quiesce is just a nice to have */
        if (unlikely(scsi_device_quiesce(sdev)))
-               goto out_free;
+               goto free_buffer;
 
        scsi_target_quiesce(starget);
 
@@ -1041,12 +1048,16 @@ spi_dv_device(struct scsi_device *sdev)
 
        spi_initial_dv(starget) = 1;
 
- out_free:
+free_buffer:
        kfree(buffer);
- out_put:
+
+put_sdev:
        spi_dv_in_progress(starget) = 0;
        scsi_device_put(sdev);
-unlock:
+put_autopm:
+       scsi_autopm_put_device(sdev);
+
+unlock_system_sleep:
        unlock_system_sleep();
 }
 EXPORT_SYMBOL(spi_dv_device);
index 679c2c02504763ba31a21c425d58bdfca02baba8..a3d2d4bc4a3dcda79a3b1368bc397eccc6a90549 100644 (file)
@@ -984,8 +984,10 @@ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
                }
        }
 
-       if (sdp->no_write_same)
+       if (sdp->no_write_same) {
+               rq->rq_flags |= RQF_QUIET;
                return BLK_STS_TARGET;
+       }
 
        if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff)
                return sd_setup_write_same16_cmnd(cmd, false);
@@ -3510,10 +3512,8 @@ static int sd_probe(struct device *dev)
 static int sd_remove(struct device *dev)
 {
        struct scsi_disk *sdkp;
-       dev_t devt;
 
        sdkp = dev_get_drvdata(dev);
-       devt = disk_devt(sdkp->disk);
        scsi_autopm_get_device(sdkp->device);
 
        async_synchronize_full_domain(&scsi_sd_pm_domain);
index fd6f84c1b4e2256454b52c0b9373c6c09a9fe9e8..895e82ea6ece551d0a5c5fa5c6ba8cf3ee3452e0 100644 (file)
@@ -31,6 +31,6 @@ TRACE_EVENT(ufs_mtk_event,
 
 #undef TRACE_INCLUDE_PATH
 #undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/scsi/ufs/
 #define TRACE_INCLUDE_FILE ufs-mediatek-trace
 #include <trace/define_trace.h>
index 3522458db3bbd0ce479d8b57c6b345ad70413dfc..80618af7c87203b256ce26087579fd582038847e 100644 (file)
@@ -70,6 +70,13 @@ static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
        return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
 }
 
+static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
+{
+       struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+       return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
+}
+
 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
 {
        u32 tmp;
@@ -514,6 +521,9 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
        if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
                host->caps |= UFS_MTK_CAP_DISABLE_AH8;
 
+       if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
+               host->caps |= UFS_MTK_CAP_BROKEN_VCC;
+
        dev_info(hba->dev, "caps: 0x%x", host->caps);
 }
 
@@ -1003,6 +1013,17 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
 {
        ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
+
+       if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
+           (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
+               hba->vreg_info.vcc->always_on = true;
+               /*
+                * VCC will be kept always-on thus we don't
+                * need any delay during regulator operations
+                */
+               hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
+                       UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
+       }
 }
 
 static void ufs_mtk_event_notify(struct ufs_hba *hba,
index 93d35097dfb0aeb03c7eaad9e1405527edd51ec2..3f0d3bb769e89b4a4a2543e952580f59e30cf215 100644 (file)
@@ -81,6 +81,7 @@ enum ufs_mtk_host_caps {
        UFS_MTK_CAP_BOOST_CRYPT_ENGINE         = 1 << 0,
        UFS_MTK_CAP_VA09_PWR_CTRL              = 1 << 1,
        UFS_MTK_CAP_DISABLE_AH8                = 1 << 2,
+       UFS_MTK_CAP_BROKEN_VCC                 = 1 << 3,
 };
 
 struct ufs_mtk_crypt_cfg {
index d593edb487677189e45bc2a51b7436ba333df083..14dfda735adf5ac2c2a06d779724f706adbfee62 100644 (file)
@@ -330,7 +330,6 @@ enum {
        UFS_DEV_WRITE_BOOSTER_SUP       = BIT(8),
 };
 
-#define POWER_DESC_MAX_SIZE                    0x62
 #define POWER_DESC_MAX_ACTV_ICC_LVLS           16
 
 /* Attribute  bActiveICCLevel parameter bit masks definitions */
@@ -513,6 +512,7 @@ struct ufs_query_res {
 struct ufs_vreg {
        struct regulator *reg;
        const char *name;
+       bool always_on;
        bool enabled;
        int min_uV;
        int max_uV;
index df3a564c3e334875ed9a2eab331344f3468031b1..fadd566025b86ab4ffe18aefd366823ea6f1ae98 100644 (file)
@@ -148,6 +148,8 @@ static int ufs_intel_common_init(struct ufs_hba *hba)
 {
        struct intel_host *host;
 
+       hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
+
        host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
        if (!host)
                return -ENOMEM;
@@ -163,6 +165,41 @@ static void ufs_intel_common_exit(struct ufs_hba *hba)
        intel_ltr_hide(hba->dev);
 }
 
+static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
+{
+       /*
+        * To support S4 (suspend-to-disk) with spm_lvl other than 5, the base
+        * address registers must be restored because the restore kernel can
+        * have used different addresses.
+        */
+       ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
+                     REG_UTP_TRANSFER_REQ_LIST_BASE_L);
+       ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
+                     REG_UTP_TRANSFER_REQ_LIST_BASE_H);
+       ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
+                     REG_UTP_TASK_REQ_LIST_BASE_L);
+       ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
+                     REG_UTP_TASK_REQ_LIST_BASE_H);
+
+       if (ufshcd_is_link_hibern8(hba)) {
+               int ret = ufshcd_uic_hibern8_exit(hba);
+
+               if (!ret) {
+                       ufshcd_set_link_active(hba);
+               } else {
+                       dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+                               __func__, ret);
+                       /*
+                        * Force reset and restore. Any other actions can lead
+                        * to an unrecoverable state.
+                        */
+                       ufshcd_set_link_off(hba);
+               }
+       }
+
+       return 0;
+}
+
 static int ufs_intel_ehl_init(struct ufs_hba *hba)
 {
        hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
@@ -174,6 +211,7 @@ static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
        .init                   = ufs_intel_common_init,
        .exit                   = ufs_intel_common_exit,
        .link_startup_notify    = ufs_intel_link_startup_notify,
+       .resume                 = ufs_intel_resume,
 };
 
 static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
@@ -181,6 +219,7 @@ static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
        .init                   = ufs_intel_ehl_init,
        .exit                   = ufs_intel_common_exit,
        .link_startup_notify    = ufs_intel_link_startup_notify,
+       .resume                 = ufs_intel_resume,
 };
 
 #ifdef CONFIG_PM_SLEEP
@@ -207,6 +246,30 @@ static int ufshcd_pci_resume(struct device *dev)
 {
        return ufshcd_system_resume(dev_get_drvdata(dev));
 }
+
+/**
+ * ufshcd_pci_poweroff - suspend-to-disk poweroff function
+ * @dev: pointer to PCI device handle
+ *
+ * Returns 0 if successful
+ * Returns non-zero otherwise
+ */
+static int ufshcd_pci_poweroff(struct device *dev)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       int spm_lvl = hba->spm_lvl;
+       int ret;
+
+       /*
+        * For poweroff we need to set the UFS device to PowerDown mode.
+        * Force spm_lvl to ensure that.
+        */
+       hba->spm_lvl = 5;
+       ret = ufshcd_system_suspend(hba);
+       hba->spm_lvl = spm_lvl;
+       return ret;
+}
+
 #endif /* !CONFIG_PM_SLEEP */
 
 #ifdef CONFIG_PM
@@ -302,8 +365,14 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 }
 
 static const struct dev_pm_ops ufshcd_pci_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend,
-                               ufshcd_pci_resume)
+#ifdef CONFIG_PM_SLEEP
+       .suspend        = ufshcd_pci_suspend,
+       .resume         = ufshcd_pci_resume,
+       .freeze         = ufshcd_pci_suspend,
+       .thaw           = ufshcd_pci_resume,
+       .poweroff       = ufshcd_pci_poweroff,
+       .restore        = ufshcd_pci_resume,
+#endif
        SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend,
                           ufshcd_pci_runtime_resume,
                           ufshcd_pci_runtime_idle)
index 9902b7e3aa4aaf49bcd266bfa4b12e09d84e7763..e31d2c5c7b23b7d152322723fdc449eb5eb5400d 100644 (file)
@@ -225,6 +225,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba);
 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
 static void ufshcd_hba_exit(struct ufs_hba *hba);
+static int ufshcd_clear_ua_wluns(struct ufs_hba *hba);
 static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
@@ -288,7 +289,8 @@ static inline void ufshcd_wb_config(struct ufs_hba *hba)
        if (ret)
                dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
                        __func__, ret);
-       ufshcd_wb_toggle_flush(hba, true);
+       if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
+               ufshcd_wb_toggle_flush(hba, true);
 }
 
 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
@@ -580,6 +582,23 @@ static void ufshcd_print_pwr_info(struct ufs_hba *hba)
                 hba->pwr_info.hs_rate);
 }
 
+static void ufshcd_device_reset(struct ufs_hba *hba)
+{
+       int err;
+
+       err = ufshcd_vops_device_reset(hba);
+
+       if (!err) {
+               ufshcd_set_ufs_dev_active(hba);
+               if (ufshcd_is_wb_allowed(hba)) {
+                       hba->wb_enabled = false;
+                       hba->wb_buf_flush_enabled = false;
+               }
+       }
+       if (err != -EOPNOTSUPP)
+               ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
+}
+
 void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
 {
        if (!us)
@@ -3665,7 +3684,7 @@ static int ufshcd_dme_enable(struct ufs_hba *hba)
        ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
        if (ret)
                dev_err(hba->dev,
-                       "dme-reset: error code %d\n", ret);
+                       "dme-enable: error code %d\n", ret);
 
        return ret;
 }
@@ -3964,7 +3983,7 @@ int ufshcd_link_recovery(struct ufs_hba *hba)
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
        /* Reset the attached device */
-       ufshcd_vops_device_reset(hba);
+       ufshcd_device_reset(hba);
 
        ret = ufshcd_host_reset_and_restore(hba);
 
@@ -5418,9 +5437,6 @@ static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
 
 static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
 {
-       if (hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL)
-               return;
-
        if (enable)
                ufshcd_wb_buf_flush_enable(hba);
        else
@@ -6643,19 +6659,16 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
 {
        struct Scsi_Host *host;
        struct ufs_hba *hba;
-       unsigned int tag;
        u32 pos;
        int err;
-       u8 resp = 0xF;
-       struct ufshcd_lrb *lrbp;
+       u8 resp = 0xF, lun;
        unsigned long flags;
 
        host = cmd->device->host;
        hba = shost_priv(host);
-       tag = cmd->request->tag;
 
-       lrbp = &hba->lrb[tag];
-       err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
+       lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
+       err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
        if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
                if (!err)
                        err = resp;
@@ -6664,7 +6677,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
 
        /* clear the commands that were pending for corresponding LUN */
        for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
-               if (hba->lrb[pos].lun == lrbp->lun) {
+               if (hba->lrb[pos].lun == lun) {
                        err = ufshcd_clear_cmd(hba, pos);
                        if (err)
                                break;
@@ -6930,7 +6943,8 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
 
        /* Establish the link again and restore the device */
        err = ufshcd_probe_hba(hba, false);
-
+       if (!err)
+               ufshcd_clear_ua_wluns(hba);
 out:
        if (err)
                dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
@@ -6968,7 +6982,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
 
        do {
                /* Reset the attached device */
-               ufshcd_vops_device_reset(hba);
+               ufshcd_device_reset(hba);
 
                err = ufshcd_host_reset_and_restore(hba);
        } while (err && --retries);
@@ -8045,7 +8059,7 @@ static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
 {
        int ret = 0;
 
-       if (!vreg || !vreg->enabled)
+       if (!vreg || !vreg->enabled || vreg->always_on)
                goto out;
 
        ret = regulator_disable(vreg->reg);
@@ -8414,13 +8428,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
         * handling context.
         */
        hba->host->eh_noresume = 1;
-       if (hba->wlun_dev_clr_ua) {
-               ret = ufshcd_send_request_sense(hba, sdp);
-               if (ret)
-                       goto out;
-               /* Unit attention condition is cleared now */
-               hba->wlun_dev_clr_ua = false;
-       }
+       ufshcd_clear_ua_wluns(hba);
 
        cmd[4] = pwr_mode << 4;
 
@@ -8441,7 +8449,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
 
        if (!ret)
                hba->curr_dev_pwr_mode = pwr_mode;
-out:
+
        scsi_device_put(sdp);
        hba->host->eh_noresume = 0;
        return ret;
@@ -8685,6 +8693,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
                        ufshcd_wb_need_flush(hba));
        }
 
+       flush_work(&hba->eeh_work);
+
        if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
                if (!ufshcd_is_runtime_pm(pm_op))
                        /* ensure that bkops is disabled */
@@ -8697,8 +8707,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
                }
        }
 
-       flush_work(&hba->eeh_work);
-
        /*
         * In the case of DeepSleep, the device is expected to remain powered
         * with the link off, so do not check for bkops.
@@ -8747,7 +8755,7 @@ set_link_active:
         * further below.
         */
        if (ufshcd_is_ufs_dev_deepsleep(hba)) {
-               ufshcd_vops_device_reset(hba);
+               ufshcd_device_reset(hba);
                WARN_ON(!ufshcd_is_link_off(hba));
        }
        if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
@@ -8757,7 +8765,7 @@ set_link_active:
 set_dev_active:
        /* Can also get here needing to exit DeepSleep */
        if (ufshcd_is_ufs_dev_deepsleep(hba)) {
-               ufshcd_vops_device_reset(hba);
+               ufshcd_device_reset(hba);
                ufshcd_host_reset_and_restore(hba);
        }
        if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
@@ -8925,7 +8933,8 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
        if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
             hba->curr_dev_pwr_mode) &&
            (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
-            hba->uic_link_state))
+            hba->uic_link_state) &&
+            !hba->dev_info.b_rpm_dev_flush_capable)
                goto out;
 
        if (pm_runtime_suspended(hba->dev)) {
@@ -9353,7 +9362,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
        }
 
        /* Reset the attached device */
-       ufshcd_vops_device_reset(hba);
+       ufshcd_device_reset(hba);
 
        ufshcd_init_crypto(hba);
 
index f8c2467dc0142b47f47d81123fc73fc951ff2d2a..aa9ea355232395f4915830119a0e37698444e7cb 100644 (file)
@@ -1218,16 +1218,12 @@ static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
                hba->vops->dbg_register_dump(hba);
 }
 
-static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
+static inline int ufshcd_vops_device_reset(struct ufs_hba *hba)
 {
-       if (hba->vops && hba->vops->device_reset) {
-               int err = hba->vops->device_reset(hba);
-
-               if (!err)
-                       ufshcd_set_ufs_dev_active(hba);
-               if (err != -EOPNOTSUPP)
-                       ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
-       }
+       if (hba->vops && hba->vops->device_reset)
+               return hba->vops->device_reset(hba);
+
+       return -EOPNOTSUPP;
 }
 
 static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
index 809bfff3690ab3702e3ae3237a72a39604c772ab..cbc4c28c1541cba3370661b2e7015a15c04b19be 100644 (file)
@@ -189,24 +189,26 @@ static int altera_spi_txrx(struct spi_master *master,
 
                /* send the first byte */
                altera_spi_tx_word(hw);
-       } else {
-               while (hw->count < hw->len) {
-                       altera_spi_tx_word(hw);
 
-                       for (;;) {
-                               altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
-                               if (val & ALTERA_SPI_STATUS_RRDY_MSK)
-                                       break;
+               return 1;
+       }
+
+       while (hw->count < hw->len) {
+               altera_spi_tx_word(hw);
 
-                               cpu_relax();
-                       }
+               for (;;) {
+                       altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
+                       if (val & ALTERA_SPI_STATUS_RRDY_MSK)
+                               break;
 
-                       altera_spi_rx_word(hw);
+                       cpu_relax();
                }
-               spi_finalize_current_transfer(master);
+
+               altera_spi_rx_word(hw);
        }
+       spi_finalize_current_transfer(master);
 
-       return t->len;
+       return 0;
 }
 
 static irqreturn_t altera_spi_irq(int irq, void *dev)
index 70467b9d61baa3d13e3851e3abd25a36b2f5919e..a3afd1b9ac567bcc48c809edef5d0509ae5dd0aa 100644 (file)
@@ -115,6 +115,7 @@ struct cdns_spi {
        void __iomem *regs;
        struct clk *ref_clk;
        struct clk *pclk;
+       unsigned int clk_rate;
        u32 speed_hz;
        const u8 *txbuf;
        u8 *rxbuf;
@@ -250,7 +251,7 @@ static void cdns_spi_config_clock_freq(struct spi_device *spi,
        u32 ctrl_reg, baud_rate_val;
        unsigned long frequency;
 
-       frequency = clk_get_rate(xspi->ref_clk);
+       frequency = xspi->clk_rate;
 
        ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
 
@@ -558,8 +559,9 @@ static int cdns_spi_probe(struct platform_device *pdev)
        master->auto_runtime_pm = true;
        master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
 
+       xspi->clk_rate = clk_get_rate(xspi->ref_clk);
        /* Set to default valid value */
-       master->max_speed_hz = clk_get_rate(xspi->ref_clk) / 4;
+       master->max_speed_hz = xspi->clk_rate / 4;
        xspi->speed_hz = master->max_speed_hz;
 
        master->bits_per_word_mask = SPI_BPW_MASK(8);
index 9494257e1c33f56173dfefb7ef7c2b49100adc01..6d8e0a05a53554b4393752a6a6cad93b6276d548 100644 (file)
@@ -115,14 +115,13 @@ static void fsl_spi_chipselect(struct spi_device *spi, int value)
 {
        struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
        struct fsl_spi_platform_data *pdata;
-       bool pol = spi->mode & SPI_CS_HIGH;
        struct spi_mpc8xxx_cs   *cs = spi->controller_state;
 
        pdata = spi->dev.parent->parent->platform_data;
 
        if (value == BITBANG_CS_INACTIVE) {
                if (pdata->cs_control)
-                       pdata->cs_control(spi, !pol);
+                       pdata->cs_control(spi, false);
        }
 
        if (value == BITBANG_CS_ACTIVE) {
@@ -134,7 +133,7 @@ static void fsl_spi_chipselect(struct spi_device *spi, int value)
                fsl_spi_change_mode(spi);
 
                if (pdata->cs_control)
-                       pdata->cs_control(spi, pol);
+                       pdata->cs_control(spi, true);
        }
 }
 
index 512e925d5ea48b8609f770626e1df95b92cbf98f..881f645661cc6137fd20cc7376e90b57e265a9f1 100644 (file)
@@ -83,6 +83,7 @@ struct spi_geni_master {
        spinlock_t lock;
        int irq;
        bool cs_flag;
+       bool abort_failed;
 };
 
 static int get_spi_clk_cfg(unsigned int speed_hz,
@@ -141,8 +142,49 @@ static void handle_fifo_timeout(struct spi_master *spi,
        spin_unlock_irq(&mas->lock);
 
        time_left = wait_for_completion_timeout(&mas->abort_done, HZ);
-       if (!time_left)
+       if (!time_left) {
                dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
+
+               /*
+                * No need for a lock since SPI core has a lock and we never
+                * access this from an interrupt.
+                */
+               mas->abort_failed = true;
+       }
+}
+
+static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas)
+{
+       struct geni_se *se = &mas->se;
+       u32 m_irq, m_irq_en;
+
+       if (!mas->abort_failed)
+               return false;
+
+       /*
+        * The only known case where a transfer times out and then a cancel
+        * times out then an abort times out is if something is blocking our
+        * interrupt handler from running.  Avoid starting any new transfers
+        * until that sorts itself out.
+        */
+       spin_lock_irq(&mas->lock);
+       m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
+       m_irq_en = readl(se->base + SE_GENI_M_IRQ_EN);
+       spin_unlock_irq(&mas->lock);
+
+       if (m_irq & m_irq_en) {
+               dev_err(mas->dev, "Interrupts pending after abort: %#010x\n",
+                       m_irq & m_irq_en);
+               return true;
+       }
+
+       /*
+        * If we're here the problem resolved itself so no need to check more
+        * on future transfers.
+        */
+       mas->abort_failed = false;
+
+       return false;
 }
 
 static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
@@ -158,10 +200,21 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
        if (set_flag == mas->cs_flag)
                return;
 
-       mas->cs_flag = set_flag;
-
        pm_runtime_get_sync(mas->dev);
+
+       if (spi_geni_is_abort_still_pending(mas)) {
+               dev_err(mas->dev, "Can't set chip select\n");
+               goto exit;
+       }
+
        spin_lock_irq(&mas->lock);
+       if (mas->cur_xfer) {
+               dev_err(mas->dev, "Can't set CS when prev xfer running\n");
+               spin_unlock_irq(&mas->lock);
+               goto exit;
+       }
+
+       mas->cs_flag = set_flag;
        reinit_completion(&mas->cs_done);
        if (set_flag)
                geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
@@ -170,9 +223,12 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
        spin_unlock_irq(&mas->lock);
 
        time_left = wait_for_completion_timeout(&mas->cs_done, HZ);
-       if (!time_left)
+       if (!time_left) {
+               dev_warn(mas->dev, "Timeout setting chip select\n");
                handle_fifo_timeout(spi, NULL);
+       }
 
+exit:
        pm_runtime_put(mas->dev);
 }
 
@@ -280,6 +336,9 @@ static int spi_geni_prepare_message(struct spi_master *spi,
        int ret;
        struct spi_geni_master *mas = spi_master_get_devdata(spi);
 
+       if (spi_geni_is_abort_still_pending(mas))
+               return -EBUSY;
+
        ret = setup_fifo_params(spi_msg->spi, spi);
        if (ret)
                dev_err(mas->dev, "Couldn't select mode %d\n", ret);
@@ -354,6 +413,12 @@ static bool geni_spi_handle_tx(struct spi_geni_master *mas)
        unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
        unsigned int i = 0;
 
+       /* Stop the watermark IRQ if nothing to send */
+       if (!mas->cur_xfer) {
+               writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+               return false;
+       }
+
        max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
        if (mas->tx_rem_bytes < max_bytes)
                max_bytes = mas->tx_rem_bytes;
@@ -396,6 +461,14 @@ static void geni_spi_handle_rx(struct spi_geni_master *mas)
                if (rx_last_byte_valid && rx_last_byte_valid < 4)
                        rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
        }
+
+       /* Clear out the FIFO and bail if nowhere to put it */
+       if (!mas->cur_xfer) {
+               for (i = 0; i < DIV_ROUND_UP(rx_bytes, bytes_per_fifo_word); i++)
+                       readl(se->base + SE_GENI_RX_FIFOn);
+               return;
+       }
+
        if (mas->rx_rem_bytes < rx_bytes)
                rx_bytes = mas->rx_rem_bytes;
 
@@ -495,6 +568,9 @@ static int spi_geni_transfer_one(struct spi_master *spi,
 {
        struct spi_geni_master *mas = spi_master_get_devdata(spi);
 
+       if (spi_geni_is_abort_still_pending(mas))
+               return -EBUSY;
+
        /* Terminate and return success for 0 byte length transfer */
        if (!xfer->len)
                return 0;
index 471dedf3d3392e7b682e7746110ee2488012459d..6017209c6d2f7b42eba482256448172933ac8be5 100644 (file)
@@ -493,9 +493,9 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len)
 
        /* align packet size with data registers access */
        if (spi->cur_bpw > 8)
-               fthlv -= (fthlv % 2); /* multiple of 2 */
+               fthlv += (fthlv % 2) ? 1 : 0;
        else
-               fthlv -= (fthlv % 4); /* multiple of 4 */
+               fthlv += (fthlv % 4) ? (4 - (fthlv % 4)) : 0;
 
        if (!fthlv)
                fthlv = 1;
index 51d7c004fbab5eee868e9abb1defad59a5df6e0d..720ab34784c1d7c7bc430f0212706902dca62359 100644 (file)
@@ -1108,6 +1108,7 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
 {
        struct spi_statistics *statm = &ctlr->statistics;
        struct spi_statistics *stats = &msg->spi->statistics;
+       u32 speed_hz = xfer->speed_hz;
        unsigned long long ms;
 
        if (spi_controller_is_slave(ctlr)) {
@@ -1116,8 +1117,11 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
                        return -EINTR;
                }
        } else {
+               if (!speed_hz)
+                       speed_hz = 100000;
+
                ms = 8LL * 1000LL * xfer->len;
-               do_div(ms, xfer->speed_hz);
+               do_div(ms, speed_hz);
                ms += ms + 200; /* some tolerance */
 
                if (ms > UINT_MAX)
@@ -3378,8 +3382,9 @@ int spi_setup(struct spi_device *spi)
        if (status)
                return status;
 
-       if (!spi->max_speed_hz ||
-           spi->max_speed_hz > spi->controller->max_speed_hz)
+       if (spi->controller->max_speed_hz &&
+           (!spi->max_speed_hz ||
+            spi->max_speed_hz > spi->controller->max_speed_hz))
                spi->max_speed_hz = spi->controller->max_speed_hz;
 
        mutex_lock(&spi->controller->io_mutex);
index d99231c737fbfb52b03265b6e8937d11743907b3..80d74cce2a010502e2539fa49b75b9bc5caf6ab7 100644 (file)
@@ -2987,7 +2987,9 @@ static int put_compat_cmd(struct comedi32_cmd_struct __user *cmd32,
        v32.chanlist_len = cmd->chanlist_len;
        v32.data = ptr_to_compat(cmd->data);
        v32.data_len = cmd->data_len;
-       return copy_to_user(cmd32, &v32, sizeof(v32));
+       if (copy_to_user(cmd32, &v32, sizeof(v32)))
+               return -EFAULT;
+       return 0;
 }
 
 /* Handle 32-bit COMEDI_CMD ioctl. */
index 861aedd5de4847c7208398b684bd343b651beeb4..0d42bc65f39bc33825118f00a8ee7ef5fd529cff 100644 (file)
@@ -278,21 +278,24 @@ static int spmi_controller_probe(struct platform_device *pdev)
        iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!iores) {
                dev_err(&pdev->dev, "can not get resource!\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_put_controller;
        }
 
        spmi_controller->base = devm_ioremap(&pdev->dev, iores->start,
                                             resource_size(iores));
        if (!spmi_controller->base) {
                dev_err(&pdev->dev, "can not remap base addr!\n");
-               return -EADDRNOTAVAIL;
+               ret = -EADDRNOTAVAIL;
+               goto err_put_controller;
        }
 
        ret = of_property_read_u32(pdev->dev.of_node, "spmi-channel",
                                   &spmi_controller->channel);
        if (ret) {
                dev_err(&pdev->dev, "can not get channel\n");
-               return -ENODEV;
+               ret = -ENODEV;
+               goto err_put_controller;
        }
 
        platform_set_drvdata(pdev, spmi_controller);
@@ -309,9 +312,15 @@ static int spmi_controller_probe(struct platform_device *pdev)
        ctrl->write_cmd = spmi_write_cmd;
 
        ret = spmi_controller_add(ctrl);
-       if (ret)
-               dev_err(&pdev->dev, "spmi_add_controller failed with error %d!\n", ret);
+       if (ret) {
+               dev_err(&pdev->dev, "spmi_controller_add failed with error %d!\n", ret);
+               goto err_put_controller;
+       }
+
+       return 0;
 
+err_put_controller:
+       spmi_controller_put(ctrl);
        return ret;
 }
 
@@ -320,7 +329,7 @@ static int spmi_del_controller(struct platform_device *pdev)
        struct spmi_controller *ctrl = platform_get_drvdata(pdev);
 
        spmi_controller_remove(ctrl);
-       kfree(ctrl);
+       spmi_controller_put(ctrl);
        return 0;
 }
 
index 52b9fb18c87f00ef54f76d13801c5e9401fffb28..b666cb23e5ca1e17672cba2c8307276970b4aa6d 100644 (file)
@@ -1062,26 +1062,6 @@ static const struct v4l2_ctrl_config ctrl_select_isp_version = {
        .def = 0,
 };
 
-#if 0 /* #ifdef CONFIG_ION */
-/*
- * Control for ISP ion device fd
- *
- * userspace will open ion device and pass the fd to kernel.
- * this fd will be used to map shared fd to buffer.
- */
-/* V4L2_CID_ATOMISP_ION_DEVICE_FD is not defined */
-static const struct v4l2_ctrl_config ctrl_ion_dev_fd = {
-       .ops = &ctrl_ops,
-       .id = V4L2_CID_ATOMISP_ION_DEVICE_FD,
-       .type = V4L2_CTRL_TYPE_INTEGER,
-       .name = "Ion Device Fd",
-       .min = -1,
-       .max = 1024,
-       .step = 1,
-       .def = ION_FD_UNSET
-};
-#endif
-
 static void atomisp_init_subdev_pipe(struct atomisp_sub_device *asd,
                                     struct atomisp_video_pipe *pipe, enum v4l2_buf_type buf_type)
 {
index d241349214e71953f0504f66c60c3951bc250615..bc4bb43743131d01c8b2a95d3e3dec096a06bb2c 100644 (file)
@@ -712,7 +712,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
        ret = dma_async_device_register(dd);
        if (ret) {
                dev_err(&pdev->dev, "failed to register dma device\n");
-               return ret;
+               goto err_uninit_hsdma;
        }
 
        ret = of_dma_controller_register(pdev->dev.of_node,
@@ -728,6 +728,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
 
 err_unregister:
        dma_async_device_unregister(dd);
+err_uninit_hsdma:
+       mtk_hsdma_uninit(hsdma);
        return ret;
 }
 
index 44e15d7fb2f09ea4f90a4fb94bf68ae9e5bf8a89..66d6f1d06f219d716e3ce7895b49984b8cbad432 100644 (file)
@@ -46,60 +46,83 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
        return 0;
 }
 
-struct xcopy_dev_search_info {
-       const unsigned char *dev_wwn;
-       struct se_device *found_dev;
-};
-
+/**
+ * target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers
+ *
+ * @se_dev: device being considered for match
+ * @dev_wwn: XCOPY requested NAA dev_wwn
+ * @return: 1 on match, 0 on no-match
+ */
 static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev,
-                                             void *data)
+                                             const unsigned char *dev_wwn)
 {
-       struct xcopy_dev_search_info *info = data;
        unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
        int rc;
 
-       if (!se_dev->dev_attrib.emulate_3pc)
+       if (!se_dev->dev_attrib.emulate_3pc) {
+               pr_debug("XCOPY: emulate_3pc disabled on se_dev %p\n", se_dev);
                return 0;
+       }
 
        memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
        target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
 
-       rc = memcmp(&tmp_dev_wwn[0], info->dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
-       if (rc != 0)
-               return 0;
-
-       info->found_dev = se_dev;
-       pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
-
-       rc = target_depend_item(&se_dev->dev_group.cg_item);
+       rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
        if (rc != 0) {
-               pr_err("configfs_depend_item attempt failed: %d for se_dev: %p\n",
-                      rc, se_dev);
-               return rc;
+               pr_debug("XCOPY: skip non-matching: %*ph\n",
+                        XCOPY_NAA_IEEE_REGEX_LEN, tmp_dev_wwn);
+               return 0;
        }
+       pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
 
-       pr_debug("Called configfs_depend_item for se_dev: %p se_dev->se_dev_group: %p\n",
-                se_dev, &se_dev->dev_group);
        return 1;
 }
 
-static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
-                                       struct se_device **found_dev)
+static int target_xcopy_locate_se_dev_e4(struct se_session *sess,
+                                       const unsigned char *dev_wwn,
+                                       struct se_device **_found_dev,
+                                       struct percpu_ref **_found_lun_ref)
 {
-       struct xcopy_dev_search_info info;
-       int ret;
-
-       memset(&info, 0, sizeof(info));
-       info.dev_wwn = dev_wwn;
-
-       ret = target_for_each_device(target_xcopy_locate_se_dev_e4_iter, &info);
-       if (ret == 1) {
-               *found_dev = info.found_dev;
-               return 0;
-       } else {
-               pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
-               return -EINVAL;
+       struct se_dev_entry *deve;
+       struct se_node_acl *nacl;
+       struct se_lun *this_lun = NULL;
+       struct se_device *found_dev = NULL;
+
+       /* cmd with NULL sess indicates no associated $FABRIC_MOD */
+       if (!sess)
+               goto err_out;
+
+       pr_debug("XCOPY 0xe4: searching for: %*ph\n",
+                XCOPY_NAA_IEEE_REGEX_LEN, dev_wwn);
+
+       nacl = sess->se_node_acl;
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
+               struct se_device *this_dev;
+               int rc;
+
+               this_lun = rcu_dereference(deve->se_lun);
+               this_dev = rcu_dereference_raw(this_lun->lun_se_dev);
+
+               rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn);
+               if (rc) {
+                       if (percpu_ref_tryget_live(&this_lun->lun_ref))
+                               found_dev = this_dev;
+                       break;
+               }
        }
+       rcu_read_unlock();
+       if (found_dev == NULL)
+               goto err_out;
+
+       pr_debug("lun_ref held for se_dev: %p se_dev->se_dev_group: %p\n",
+                found_dev, &found_dev->dev_group);
+       *_found_dev = found_dev;
+       *_found_lun_ref = &this_lun->lun_ref;
+       return 0;
+err_out:
+       pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
+       return -EINVAL;
 }
 
 static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
@@ -246,12 +269,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
 
        switch (xop->op_origin) {
        case XCOL_SOURCE_RECV_OP:
-               rc = target_xcopy_locate_se_dev_e4(xop->dst_tid_wwn,
-                                               &xop->dst_dev);
+               rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
+                                               xop->dst_tid_wwn,
+                                               &xop->dst_dev,
+                                               &xop->remote_lun_ref);
                break;
        case XCOL_DEST_RECV_OP:
-               rc = target_xcopy_locate_se_dev_e4(xop->src_tid_wwn,
-                                               &xop->src_dev);
+               rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
+                                               xop->src_tid_wwn,
+                                               &xop->src_dev,
+                                               &xop->remote_lun_ref);
                break;
        default:
                pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - "
@@ -391,18 +418,12 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
 
 static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
 {
-       struct se_device *remote_dev;
-
        if (xop->op_origin == XCOL_SOURCE_RECV_OP)
-               remote_dev = xop->dst_dev;
+               pr_debug("putting dst lun_ref for %p\n", xop->dst_dev);
        else
-               remote_dev = xop->src_dev;
-
-       pr_debug("Calling configfs_undepend_item for"
-                 " remote_dev: %p remote_dev->dev_group: %p\n",
-                 remote_dev, &remote_dev->dev_group.cg_item);
+               pr_debug("putting src lun_ref for %p\n", xop->src_dev);
 
-       target_undepend_item(&remote_dev->dev_group.cg_item);
+       percpu_ref_put(xop->remote_lun_ref);
 }
 
 static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
index c56a1bde9417bc34ae1a6432d35f2acb2ca93067..e5f20005179a86d1414ddbd87939f142b959400c 100644 (file)
@@ -27,6 +27,7 @@ struct xcopy_op {
        struct se_device *dst_dev;
        unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
        unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+       struct percpu_ref *remote_lun_ref;
 
        sector_t src_lba;
        sector_t dst_lba;
index 47a6e42f0d04f1a4c8883511060add1ae7456d65..e15cd6b5bb99a896b21d98506a900d1ab6686cb6 100644 (file)
@@ -401,6 +401,20 @@ config MIPS_EJTAG_FDC_KGDB_CHAN
        help
          FDC channel number to use for KGDB.
 
+config NULL_TTY
+       tristate "NULL TTY driver"
+       help
+         Say Y here if you want a NULL TTY which simply discards messages.
+
+         This is useful to allow userspace applications which expect a console
+         device to work without modifications even when no console is
+         available or desired.
+
+         In order to use this driver, you should redirect the console to this
+         TTY, or boot the kernel with console=ttynull.
+
+         If unsure, say N.
+
 config TRACE_ROUTER
        tristate "Trace data router for MIPI P1149.7 cJTAG standard"
        depends on TRACE_SINK
index 3c1c5a9240a70279c9904445417e83e36b7f107c..b3ccae9326601cdbf13604f105bc08e2a1c27e35 100644 (file)
@@ -2,7 +2,7 @@
 obj-$(CONFIG_TTY)              += tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \
                                   tty_buffer.o tty_port.o tty_mutex.o \
                                   tty_ldsem.o tty_baudrate.o tty_jobctrl.o \
-                                  n_null.o ttynull.o
+                                  n_null.o
 obj-$(CONFIG_LEGACY_PTYS)      += pty.o
 obj-$(CONFIG_UNIX98_PTYS)      += pty.o
 obj-$(CONFIG_AUDIT)            += tty_audit.o
@@ -25,6 +25,7 @@ obj-$(CONFIG_ISI)             += isicom.o
 obj-$(CONFIG_MOXA_INTELLIO)    += moxa.o
 obj-$(CONFIG_MOXA_SMARTIO)     += mxser.o
 obj-$(CONFIG_NOZOMI)           += nozomi.o
+obj-$(CONFIG_NULL_TTY)         += ttynull.o
 obj-$(CONFIG_ROCKETPORT)       += rocket.o
 obj-$(CONFIG_SYNCLINK_GT)      += synclink_gt.o
 obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o
index 1066eebe3b28b046deb69348b1357c77eed90185..328d5a78792fe78c19400fb78373fcee440c3a99 100644 (file)
@@ -1000,6 +1000,7 @@ static int sifive_serial_probe(struct platform_device *pdev)
        /* Set up clock divider */
        ssp->clkin_rate = clk_get_rate(ssp->clk);
        ssp->baud_rate = SIFIVE_DEFAULT_BAUD_RATE;
+       ssp->port.uartclk = ssp->baud_rate * 16;
        __ssp_update_div(ssp);
 
        platform_set_drvdata(pdev, ssp);
index eced70ec54e174e0ae6d48ba90777ebcf08f5e4e..17f05b7eb6d3ede3790d98d9527c7221eb907405 100644 (file)
@@ -2,13 +2,6 @@
 /*
  * Copyright (C) 2019 Axis Communications AB
  *
- * The console is useful for userspace applications which expect a console
- * device to work without modifications even when no console is available
- * or desired.
- *
- * In order to use this driver, you should redirect the console to this
- * TTY, or boot the kernel with console=ttynull.
- *
  * Based on ttyprintk.c:
  *  Copyright (C) 2010 Samo Pogacnik
  */
@@ -66,17 +59,6 @@ static struct console ttynull_console = {
        .device = ttynull_device,
 };
 
-void __init register_ttynull_console(void)
-{
-       if (!ttynull_driver)
-               return;
-
-       if (add_preferred_console(ttynull_console.name, 0, NULL))
-               return;
-
-       register_console(&ttynull_console);
-}
-
 static int __init ttynull_init(void)
 {
        struct tty_driver *driver;
index 9e12152ea46bcc95fe26564a6d22937c0a8a9fef..8b7bc10b6e8b44a6169d0027b9eb3693b862835b 100644 (file)
@@ -139,9 +139,13 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
        misc_pdev = of_find_device_by_node(args.np);
        of_node_put(args.np);
 
-       if (!misc_pdev || !platform_get_drvdata(misc_pdev))
+       if (!misc_pdev)
                return ERR_PTR(-EPROBE_DEFER);
 
+       if (!platform_get_drvdata(misc_pdev)) {
+               put_device(&misc_pdev->dev);
+               return ERR_PTR(-EPROBE_DEFER);
+       }
        data->dev = &misc_pdev->dev;
 
        /*
index f52f1bc0559f9d306b1c3b72b4cc86327e7e583c..781905745812eaab77053a423fd4181129807a46 100644 (file)
@@ -1895,6 +1895,10 @@ static const struct usb_device_id acm_ids[] = {
        { USB_DEVICE(0x04d8, 0xfd08),
        .driver_info = IGNORE_DEVICE,
        },
+
+       { USB_DEVICE(0x04d8, 0xf58b),
+       .driver_info = IGNORE_DEVICE,
+       },
 #endif
 
        /*Samsung phone in firmware update mode */
index 02d0cfd23bb2975223029324ec42232e00d0fbff..508b1c3f8b731bae17acb90b39a9ab64f683adb0 100644 (file)
@@ -465,13 +465,23 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
        if (!desc->resp_count || !--desc->resp_count)
                goto out;
 
+       if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
+               rv = -ENODEV;
+               goto out;
+       }
+       if (test_bit(WDM_RESETTING, &desc->flags)) {
+               rv = -EIO;
+               goto out;
+       }
+
        set_bit(WDM_RESPONDING, &desc->flags);
        spin_unlock_irq(&desc->iuspin);
        rv = usb_submit_urb(desc->response, GFP_KERNEL);
        spin_lock_irq(&desc->iuspin);
        if (rv) {
-               dev_err(&desc->intf->dev,
-                       "usb_submit_urb failed with result %d\n", rv);
+               if (!test_bit(WDM_DISCONNECTING, &desc->flags))
+                       dev_err(&desc->intf->dev,
+                               "usb_submit_urb failed with result %d\n", rv);
 
                /* make sure the next notification trigger a submit */
                clear_bit(WDM_RESPONDING, &desc->flags);
@@ -1027,9 +1037,9 @@ static void wdm_disconnect(struct usb_interface *intf)
        wake_up_all(&desc->wait);
        mutex_lock(&desc->rlock);
        mutex_lock(&desc->wlock);
-       kill_urbs(desc);
        cancel_work_sync(&desc->rxwork);
        cancel_work_sync(&desc->service_outs_intr);
+       kill_urbs(desc);
        mutex_unlock(&desc->wlock);
        mutex_unlock(&desc->rlock);
 
index 67cbd42421bee7e5776659a5f7ebfb077564a457..134dc2005ce97de81ee1787510d4772395da8ef4 100644 (file)
@@ -274,8 +274,25 @@ static int usblp_ctrl_msg(struct usblp *usblp, int request, int type, int dir, i
 #define usblp_reset(usblp)\
        usblp_ctrl_msg(usblp, USBLP_REQ_RESET, USB_TYPE_CLASS, USB_DIR_OUT, USB_RECIP_OTHER, 0, NULL, 0)
 
-#define usblp_hp_channel_change_request(usblp, channel, buffer) \
-       usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST, USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE, channel, buffer, 1)
+static int usblp_hp_channel_change_request(struct usblp *usblp, int channel, u8 *new_channel)
+{
+       u8 *buf;
+       int ret;
+
+       buf = kzalloc(1, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       ret = usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST,
+                       USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE,
+                       channel, buf, 1);
+       if (ret == 0)
+               *new_channel = buf[0];
+
+       kfree(buf);
+
+       return ret;
+}
 
 /*
  * See the description for usblp_select_alts() below for the usage
index 60886a7464c3a95b029163c630f8914f97c66b03..ad5a0f405a75cd9c9a1cbb81ff071ec04281eef5 100644 (file)
@@ -1649,14 +1649,12 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
        urb->status = status;
        /*
         * This function can be called in task context inside another remote
-        * coverage collection section, but KCOV doesn't support that kind of
+        * coverage collection section, but kcov doesn't support that kind of
         * recursion yet. Only collect coverage in softirq context for now.
         */
-       if (in_serving_softirq())
-               kcov_remote_start_usb((u64)urb->dev->bus->busnum);
+       kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
        urb->complete(urb);
-       if (in_serving_softirq())
-               kcov_remote_stop();
+       kcov_remote_stop_softirq();
 
        usb_anchor_resume_wakeups(anchor);
        atomic_dec(&urb->use_count);
index 2f95f08ca51190e89c14719ca880e726bef7c12b..1b241f937d8f42b838d1a5f173b21224e33a44b2 100644 (file)
 
 /* Global USB2 PHY Vendor Control Register */
 #define DWC3_GUSB2PHYACC_NEWREGREQ     BIT(25)
+#define DWC3_GUSB2PHYACC_DONE          BIT(24)
 #define DWC3_GUSB2PHYACC_BUSY          BIT(23)
 #define DWC3_GUSB2PHYACC_WRITE         BIT(22)
 #define DWC3_GUSB2PHYACC_ADDR(n)       (n << 16)
index 417e05381b5d0fc98d452f3b0822e957b2f441b8..bdf1f98dfad8c861aeb8ad7b3ebf81bb1d34676a 100644 (file)
@@ -754,7 +754,7 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
 
        ret = priv->drvdata->setup_regmaps(priv, base);
        if (ret)
-               return ret;
+               goto err_disable_clks;
 
        if (priv->vbus) {
                ret = regulator_enable(priv->vbus);
index 78cb4db8a6e45d483763f76faa875c6ac50ed5f5..ee44321fee38611910756410e936b92fdd134a98 100644 (file)
@@ -1763,6 +1763,8 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
                        list_for_each_entry_safe(r, t, &dep->started_list, list)
                                dwc3_gadget_move_cancelled_request(r);
 
+                       dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
+
                        goto out;
                }
        }
@@ -2083,6 +2085,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
 
 static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
 static void __dwc3_gadget_stop(struct dwc3 *dwc);
+static int __dwc3_gadget_start(struct dwc3 *dwc);
 
 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
 {
@@ -2145,6 +2148,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
                        dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %
                                                dwc->ev_buf->length;
                }
+       } else {
+               __dwc3_gadget_start(dwc);
        }
 
        ret = dwc3_gadget_run_stop(dwc, is_on, false);
@@ -2319,10 +2324,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
        }
 
        dwc->gadget_driver      = driver;
-
-       if (pm_runtime_active(dwc->dev))
-               __dwc3_gadget_start(dwc);
-
        spin_unlock_irqrestore(&dwc->lock, flags);
 
        return 0;
@@ -2348,13 +2349,6 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
        unsigned long           flags;
 
        spin_lock_irqsave(&dwc->lock, flags);
-
-       if (pm_runtime_suspended(dwc->dev))
-               goto out;
-
-       __dwc3_gadget_stop(dwc);
-
-out:
        dwc->gadget_driver      = NULL;
        spin_unlock_irqrestore(&dwc->lock, flags);
 
index aa213c9815f67bccb8026d85754b3e59f744d8a3..f23f4c9a557e9ab7cab5afeeaf27c1051948659e 100644 (file)
@@ -7,6 +7,8 @@
  * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
  */
 
+#include <linux/delay.h>
+#include <linux/time64.h>
 #include <linux/ulpi/regs.h>
 
 #include "core.h"
                DWC3_GUSB2PHYACC_ADDR(ULPI_ACCESS_EXTENDED) | \
                DWC3_GUSB2PHYACC_EXTEND_ADDR(a) : DWC3_GUSB2PHYACC_ADDR(a))
 
-static int dwc3_ulpi_busyloop(struct dwc3 *dwc)
+#define DWC3_ULPI_BASE_DELAY   DIV_ROUND_UP(NSEC_PER_SEC, 60000000L)
+
+static int dwc3_ulpi_busyloop(struct dwc3 *dwc, u8 addr, bool read)
 {
-       unsigned int count = 1000;
+       unsigned long ns = 5L * DWC3_ULPI_BASE_DELAY;
+       unsigned int count = 10000;
        u32 reg;
 
+       if (addr >= ULPI_EXT_VENDOR_SPECIFIC)
+               ns += DWC3_ULPI_BASE_DELAY;
+
+       if (read)
+               ns += DWC3_ULPI_BASE_DELAY;
+
+       reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+       if (reg & DWC3_GUSB2PHYCFG_SUSPHY)
+               usleep_range(1000, 1200);
+
        while (count--) {
+               ndelay(ns);
                reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0));
-               if (!(reg & DWC3_GUSB2PHYACC_BUSY))
+               if (reg & DWC3_GUSB2PHYACC_DONE)
                        return 0;
                cpu_relax();
        }
@@ -38,16 +54,10 @@ static int dwc3_ulpi_read(struct device *dev, u8 addr)
        u32 reg;
        int ret;
 
-       reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
-       if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
-               reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
-               dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
-       }
-
        reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
        dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
 
-       ret = dwc3_ulpi_busyloop(dwc);
+       ret = dwc3_ulpi_busyloop(dwc, addr, true);
        if (ret)
                return ret;
 
@@ -61,17 +71,11 @@ static int dwc3_ulpi_write(struct device *dev, u8 addr, u8 val)
        struct dwc3 *dwc = dev_get_drvdata(dev);
        u32 reg;
 
-       reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
-       if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
-               reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
-               dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
-       }
-
        reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
        reg |= DWC3_GUSB2PHYACC_WRITE | val;
        dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
 
-       return dwc3_ulpi_busyloop(dwc);
+       return dwc3_ulpi_busyloop(dwc, addr, false);
 }
 
 static const struct ulpi_ops dwc3_ulpi_ops = {
index 7e47e6223089cb42c2df61f00a17f0205ffecf24..2d152571a7de81925ffa4d0cfef78227ad0ce17a 100644 (file)
@@ -265,6 +265,7 @@ config USB_CONFIGFS_NCM
        depends on NET
        select USB_U_ETHER
        select USB_F_NCM
+       select CRC32
        help
          NCM is an advanced protocol for Ethernet encapsulation, allows
          grouping of several ethernet frames into one USB transfer and
@@ -314,6 +315,7 @@ config USB_CONFIGFS_EEM
        depends on NET
        select USB_U_ETHER
        select USB_F_EEM
+       select CRC32
        help
          CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
          and therefore can be supported by more hardware.  Technically ECM and
index c6d455f2bb928b22cbedc5f0901221fdca997429..1a556a628971f6ff82bfcea31a9f6954fcdea06d 100644 (file)
@@ -392,8 +392,11 @@ int usb_function_deactivate(struct usb_function *function)
 
        spin_lock_irqsave(&cdev->lock, flags);
 
-       if (cdev->deactivations == 0)
+       if (cdev->deactivations == 0) {
+               spin_unlock_irqrestore(&cdev->lock, flags);
                status = usb_gadget_deactivate(cdev->gadget);
+               spin_lock_irqsave(&cdev->lock, flags);
+       }
        if (status == 0)
                cdev->deactivations++;
 
@@ -424,8 +427,11 @@ int usb_function_activate(struct usb_function *function)
                status = -EINVAL;
        else {
                cdev->deactivations--;
-               if (cdev->deactivations == 0)
+               if (cdev->deactivations == 0) {
+                       spin_unlock_irqrestore(&cdev->lock, flags);
                        status = usb_gadget_activate(cdev->gadget);
+                       spin_lock_irqsave(&cdev->lock, flags);
+               }
        }
 
        spin_unlock_irqrestore(&cdev->lock, flags);
index 56051bb9734983d643ff809007b7c8c5862ae758..36ffb43f9c1a0541f8c4d84d7c02502567ac9ce2 100644 (file)
@@ -221,9 +221,16 @@ static ssize_t gadget_dev_desc_bcdUSB_store(struct config_item *item,
 
 static ssize_t gadget_dev_desc_UDC_show(struct config_item *item, char *page)
 {
-       char *udc_name = to_gadget_info(item)->composite.gadget_driver.udc_name;
+       struct gadget_info *gi = to_gadget_info(item);
+       char *udc_name;
+       int ret;
+
+       mutex_lock(&gi->lock);
+       udc_name = gi->composite.gadget_driver.udc_name;
+       ret = sprintf(page, "%s\n", udc_name ?: "");
+       mutex_unlock(&gi->lock);
 
-       return sprintf(page, "%s\n", udc_name ?: "");
+       return ret;
 }
 
 static int unregister_gadget(struct gadget_info *gi)
@@ -1248,9 +1255,9 @@ static void purge_configs_funcs(struct gadget_info *gi)
 
                cfg = container_of(c, struct config_usb_cfg, c);
 
-               list_for_each_entry_safe(f, tmp, &c->functions, list) {
+               list_for_each_entry_safe_reverse(f, tmp, &c->functions, list) {
 
-                       list_move_tail(&f->list, &cfg->func_list);
+                       list_move(&f->list, &cfg->func_list);
                        if (f->unbind) {
                                dev_dbg(&gi->cdev.gadget->dev,
                                        "unbind function '%s'/%p\n",
@@ -1536,7 +1543,7 @@ static const struct usb_gadget_driver configfs_driver_template = {
        .suspend        = configfs_composite_suspend,
        .resume         = configfs_composite_resume,
 
-       .max_speed      = USB_SPEED_SUPER,
+       .max_speed      = USB_SPEED_SUPER_PLUS,
        .driver = {
                .owner          = THIS_MODULE,
                .name           = "configfs-gadget",
@@ -1576,7 +1583,7 @@ static struct config_group *gadgets_make(
        gi->composite.unbind = configfs_do_nothing;
        gi->composite.suspend = NULL;
        gi->composite.resume = NULL;
-       gi->composite.max_speed = USB_SPEED_SUPER;
+       gi->composite.max_speed = USB_SPEED_SUPER_PLUS;
 
        spin_lock_init(&gi->spinlock);
        mutex_init(&gi->lock);
index 64a4112068fc8b05bf5b219d5bb1ce497d7a90fe..2f1eb2e81d306bdf34bf0cc0c78fad477738c6e7 100644 (file)
@@ -1162,6 +1162,7 @@ fail_tx_reqs:
                printer_req_free(dev->in_ep, req);
        }
 
+       usb_free_all_descriptors(f);
        return ret;
 
 }
index 3633df6d7610f04e1d6b1142e9d8c2223900d806..5d960b6603b6f0f67e2590bb50aa0809edcd4481 100644 (file)
@@ -271,7 +271,7 @@ static struct usb_endpoint_descriptor fs_epout_desc = {
 
        .bEndpointAddress = USB_DIR_OUT,
        .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
-       .wMaxPacketSize = cpu_to_le16(1023),
+       /* .wMaxPacketSize = DYNAMIC */
        .bInterval = 1,
 };
 
@@ -280,7 +280,7 @@ static struct usb_endpoint_descriptor hs_epout_desc = {
        .bDescriptorType = USB_DT_ENDPOINT,
 
        .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
-       .wMaxPacketSize = cpu_to_le16(1024),
+       /* .wMaxPacketSize = DYNAMIC */
        .bInterval = 4,
 };
 
@@ -348,7 +348,7 @@ static struct usb_endpoint_descriptor fs_epin_desc = {
 
        .bEndpointAddress = USB_DIR_IN,
        .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
-       .wMaxPacketSize = cpu_to_le16(1023),
+       /* .wMaxPacketSize = DYNAMIC */
        .bInterval = 1,
 };
 
@@ -357,7 +357,7 @@ static struct usb_endpoint_descriptor hs_epin_desc = {
        .bDescriptorType = USB_DT_ENDPOINT,
 
        .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
-       .wMaxPacketSize = cpu_to_le16(1024),
+       /* .wMaxPacketSize = DYNAMIC */
        .bInterval = 4,
 };
 
@@ -444,12 +444,28 @@ struct cntrl_range_lay3 {
        __le32  dRES;
 } __packed;
 
-static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
+static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
        struct usb_endpoint_descriptor *ep_desc,
-       unsigned int factor, bool is_playback)
+       enum usb_device_speed speed, bool is_playback)
 {
        int chmask, srate, ssize;
-       u16 max_packet_size;
+       u16 max_size_bw, max_size_ep;
+       unsigned int factor;
+
+       switch (speed) {
+       case USB_SPEED_FULL:
+               max_size_ep = 1023;
+               factor = 1000;
+               break;
+
+       case USB_SPEED_HIGH:
+               max_size_ep = 1024;
+               factor = 8000;
+               break;
+
+       default:
+               return -EINVAL;
+       }
 
        if (is_playback) {
                chmask = uac2_opts->p_chmask;
@@ -461,10 +477,12 @@ static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
                ssize = uac2_opts->c_ssize;
        }
 
-       max_packet_size = num_channels(chmask) * ssize *
+       max_size_bw = num_channels(chmask) * ssize *
                DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1)));
-       ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_packet_size,
-                               le16_to_cpu(ep_desc->wMaxPacketSize)));
+       ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw,
+                                                   max_size_ep));
+
+       return 0;
 }
 
 /* Use macro to overcome line length limitation */
@@ -670,10 +688,33 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
        }
 
        /* Calculate wMaxPacketSize according to audio bandwidth */
-       set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true);
-       set_ep_max_packet_size(uac2_opts, &fs_epout_desc, 1000, false);
-       set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true);
-       set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false);
+       ret = set_ep_max_packet_size(uac2_opts, &fs_epin_desc, USB_SPEED_FULL,
+                                    true);
+       if (ret < 0) {
+               dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+               return ret;
+       }
+
+       ret = set_ep_max_packet_size(uac2_opts, &fs_epout_desc, USB_SPEED_FULL,
+                                    false);
+       if (ret < 0) {
+               dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+               return ret;
+       }
+
+       ret = set_ep_max_packet_size(uac2_opts, &hs_epin_desc, USB_SPEED_HIGH,
+                                    true);
+       if (ret < 0) {
+               dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+               return ret;
+       }
+
+       ret = set_ep_max_packet_size(uac2_opts, &hs_epout_desc, USB_SPEED_HIGH,
+                                    false);
+       if (ret < 0) {
+               dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+               return ret;
+       }
 
        if (EPOUT_EN(uac2_opts)) {
                agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
index 31ea76adcc0db3a48b88fb044b1dbd4028233360..c019f2b0c0af3d680fbae7aaf35001c4476c1f0f 100644 (file)
 #define UETH__VERSION  "29-May-2008"
 
 /* Experiments show that both Linux and Windows hosts allow up to 16k
- * frame sizes. Set the max size to 15k+52 to prevent allocating 32k
+ * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k
  * blocks and still have efficient handling. */
-#define GETHER_MAX_ETH_FRAME_LEN 15412
+#define GETHER_MAX_MTU_SIZE 15412
+#define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN)
 
 struct eth_dev {
        /* lock is held while accessing port_usb
@@ -786,7 +787,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
 
        /* MTU range: 14 - 15412 */
        net->min_mtu = ETH_HLEN;
-       net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
+       net->max_mtu = GETHER_MAX_MTU_SIZE;
 
        dev->gadget = g;
        SET_NETDEV_DEV(net, &g->dev);
@@ -848,7 +849,7 @@ struct net_device *gether_setup_name_default(const char *netname)
 
        /* MTU range: 14 - 15412 */
        net->min_mtu = ETH_HLEN;
-       net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
+       net->max_mtu = GETHER_MAX_MTU_SIZE;
 
        return net;
 }
index 59be2d8417c9cef33edebf64aa9fd86421a32709..e8033e5f0c18e4cd4197fa8a5e8d947134bce07d 100644 (file)
@@ -200,8 +200,10 @@ static int acm_ms_bind(struct usb_composite_dev *cdev)
                struct usb_descriptor_header *usb_desc;
 
                usb_desc = usb_otg_descriptor_alloc(gadget);
-               if (!usb_desc)
+               if (!usb_desc) {
+                       status = -ENOMEM;
                        goto fail_string_ids;
+               }
                usb_otg_descriptor_init(gadget, usb_desc);
                otg_desc[0] = usb_desc;
                otg_desc[1] = NULL;
index 1a12aab208b465d32cbf2b4d80de61c1a54cc14a..8c614bb86c665c773ac0edad7a07a4a8bd837436 100644 (file)
@@ -90,7 +90,7 @@ config USB_BCM63XX_UDC
 
 config USB_FSL_USB2
        tristate "Freescale Highspeed USB DR Peripheral Controller"
-       depends on FSL_SOC || ARCH_MXC
+       depends on FSL_SOC
        help
           Some of Freescale PowerPC and i.MX processors have a High Speed
           Dual-Role(DR) USB controller, which supports device mode.
index f5a7ce28aecdfa23740b441ab4e1748f2094237a..a21f2224e7eb76902e4fdf2ec472f079cbdba0ff 100644 (file)
@@ -23,7 +23,6 @@ obj-$(CONFIG_USB_ATMEL_USBA)  += atmel_usba_udc.o
 obj-$(CONFIG_USB_BCM63XX_UDC)  += bcm63xx_udc.o
 obj-$(CONFIG_USB_FSL_USB2)     += fsl_usb2_udc.o
 fsl_usb2_udc-y                 := fsl_udc_core.o
-fsl_usb2_udc-$(CONFIG_ARCH_MXC)        += fsl_mxc_udc.o
 obj-$(CONFIG_USB_TEGRA_XUDC)   += tegra-xudc.o
 obj-$(CONFIG_USB_M66592)       += m66592-udc.o
 obj-$(CONFIG_USB_R8A66597)     += r8a66597-udc.o
index 5b5cfeb6c14a6f331e7d3c54ad5f8b680be2da0e..6a62bbd01324f5bb99998a489188a5949a72dd3e 100644 (file)
@@ -659,8 +659,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
  *
  * Enables the D+ (or potentially D-) pullup.  The host will start
  * enumerating this gadget when the pullup is active and a VBUS session
- * is active (the link is powered).  This pullup is always enabled unless
- * usb_gadget_disconnect() has been used to disable it.
+ * is active (the link is powered).
  *
  * Returns zero on success, else negative errno.
  */
index ab5e978b5052c01e348a15e46bdfff9d493145aa..1a953f44183aaf0cb99a89c8d18784eefead39e2 100644 (file)
@@ -2118,9 +2118,21 @@ static int dummy_hub_control(
                                dum_hcd->port_status &= ~USB_PORT_STAT_POWER;
                        set_link_state(dum_hcd);
                        break;
-               default:
+               case USB_PORT_FEAT_ENABLE:
+               case USB_PORT_FEAT_C_ENABLE:
+               case USB_PORT_FEAT_C_SUSPEND:
+                       /* Not allowed for USB-3 */
+                       if (hcd->speed == HCD_USB3)
+                               goto error;
+                       fallthrough;
+               case USB_PORT_FEAT_C_CONNECTION:
+               case USB_PORT_FEAT_C_RESET:
                        dum_hcd->port_status &= ~(1 << wValue);
                        set_link_state(dum_hcd);
+                       break;
+               default:
+               /* Disallow INDICATOR and C_OVER_CURRENT */
+                       goto error;
                }
                break;
        case GetHubDescriptor:
@@ -2281,18 +2293,17 @@ static int dummy_hub_control(
                         */
                        dum_hcd->re_timeout = jiffies + msecs_to_jiffies(50);
                        fallthrough;
+               case USB_PORT_FEAT_C_CONNECTION:
+               case USB_PORT_FEAT_C_RESET:
+               case USB_PORT_FEAT_C_ENABLE:
+               case USB_PORT_FEAT_C_SUSPEND:
+                       /* Not allowed for USB-3, and ignored for USB-2 */
+                       if (hcd->speed == HCD_USB3)
+                               goto error;
+                       break;
                default:
-                       if (hcd->speed == HCD_USB3) {
-                               if ((dum_hcd->port_status &
-                                    USB_SS_PORT_STAT_POWER) != 0) {
-                                       dum_hcd->port_status |= (1 << wValue);
-                               }
-                       } else
-                               if ((dum_hcd->port_status &
-                                    USB_PORT_STAT_POWER) != 0) {
-                                       dum_hcd->port_status |= (1 << wValue);
-                               }
-                       set_link_state(dum_hcd);
+               /* Disallow TEST, INDICATOR, and C_OVER_CURRENT */
+                       goto error;
                }
                break;
        case GetPortErrorCount:
diff --git a/drivers/usb/gadget/udc/fsl_mxc_udc.c b/drivers/usb/gadget/udc/fsl_mxc_udc.c
deleted file mode 100644 (file)
index 5a32199..0000000
+++ /dev/null
@@ -1,122 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (C) 2009
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- *
- * Description:
- * Helper routines for i.MX3x SoCs from Freescale, needed by the fsl_usb2_udc.c
- * driver to function correctly on these systems.
- */
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/fsl_devices.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include "fsl_usb2_udc.h"
-
-static struct clk *mxc_ahb_clk;
-static struct clk *mxc_per_clk;
-static struct clk *mxc_ipg_clk;
-
-/* workaround ENGcm09152 for i.MX35 */
-#define MX35_USBPHYCTRL_OFFSET         0x600
-#define USBPHYCTRL_OTGBASE_OFFSET      0x8
-#define USBPHYCTRL_EVDO                        (1 << 23)
-
-int fsl_udc_clk_init(struct platform_device *pdev)
-{
-       struct fsl_usb2_platform_data *pdata;
-       unsigned long freq;
-       int ret;
-
-       pdata = dev_get_platdata(&pdev->dev);
-
-       mxc_ipg_clk = devm_clk_get(&pdev->dev, "ipg");
-       if (IS_ERR(mxc_ipg_clk)) {
-               dev_err(&pdev->dev, "clk_get(\"ipg\") failed\n");
-               return PTR_ERR(mxc_ipg_clk);
-       }
-
-       mxc_ahb_clk = devm_clk_get(&pdev->dev, "ahb");
-       if (IS_ERR(mxc_ahb_clk)) {
-               dev_err(&pdev->dev, "clk_get(\"ahb\") failed\n");
-               return PTR_ERR(mxc_ahb_clk);
-       }
-
-       mxc_per_clk = devm_clk_get(&pdev->dev, "per");
-       if (IS_ERR(mxc_per_clk)) {
-               dev_err(&pdev->dev, "clk_get(\"per\") failed\n");
-               return PTR_ERR(mxc_per_clk);
-       }
-
-       clk_prepare_enable(mxc_ipg_clk);
-       clk_prepare_enable(mxc_ahb_clk);
-       clk_prepare_enable(mxc_per_clk);
-
-       /* make sure USB_CLK is running at 60 MHz +/- 1000 Hz */
-       if (!strcmp(pdev->id_entry->name, "imx-udc-mx27")) {
-               freq = clk_get_rate(mxc_per_clk);
-               if (pdata->phy_mode != FSL_USB2_PHY_ULPI &&
-                   (freq < 59999000 || freq > 60001000)) {
-                       dev_err(&pdev->dev, "USB_CLK=%lu, should be 60MHz\n", freq);
-                       ret = -EINVAL;
-                       goto eclkrate;
-               }
-       }
-
-       return 0;
-
-eclkrate:
-       clk_disable_unprepare(mxc_ipg_clk);
-       clk_disable_unprepare(mxc_ahb_clk);
-       clk_disable_unprepare(mxc_per_clk);
-       mxc_per_clk = NULL;
-       return ret;
-}
-
-int fsl_udc_clk_finalize(struct platform_device *pdev)
-{
-       struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
-       int ret = 0;
-
-       /* workaround ENGcm09152 for i.MX35 */
-       if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) {
-               unsigned int v;
-               struct resource *res = platform_get_resource
-                       (pdev, IORESOURCE_MEM, 0);
-               void __iomem *phy_regs = ioremap(res->start +
-                                               MX35_USBPHYCTRL_OFFSET, 512);
-               if (!phy_regs) {
-                       dev_err(&pdev->dev, "ioremap for phy address fails\n");
-                       ret = -EINVAL;
-                       goto ioremap_err;
-               }
-
-               v = readl(phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
-               writel(v | USBPHYCTRL_EVDO,
-                       phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
-
-               iounmap(phy_regs);
-       }
-
-
-ioremap_err:
-       /* ULPI transceivers don't need usbpll */
-       if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
-               clk_disable_unprepare(mxc_per_clk);
-               mxc_per_clk = NULL;
-       }
-
-       return ret;
-}
-
-void fsl_udc_clk_release(void)
-{
-       if (mxc_per_clk)
-               clk_disable_unprepare(mxc_per_clk);
-       clk_disable_unprepare(mxc_ahb_clk);
-       clk_disable_unprepare(mxc_ipg_clk);
-}
index 91ab81c3fc79aacbd44efdd234a3740682e41a7d..e86940571b4cf1ec7153ad15a0f22a2a2510544e 100644 (file)
@@ -4770,19 +4770,19 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
 {
        unsigned long long timeout_ns;
 
+       if (xhci->quirks & XHCI_INTEL_HOST)
+               timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
+       else
+               timeout_ns = udev->u1_params.sel;
+
        /* Prevent U1 if service interval is shorter than U1 exit latency */
        if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
-               if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
+               if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
                        dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
                        return USB3_LPM_DISABLED;
                }
        }
 
-       if (xhci->quirks & XHCI_INTEL_HOST)
-               timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
-       else
-               timeout_ns = udev->u1_params.sel;
-
        /* The U1 timeout is encoded in 1us intervals.
         * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
         */
@@ -4834,19 +4834,19 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
 {
        unsigned long long timeout_ns;
 
+       if (xhci->quirks & XHCI_INTEL_HOST)
+               timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
+       else
+               timeout_ns = udev->u2_params.sel;
+
        /* Prevent U2 if service interval is shorter than U2 exit latency */
        if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
-               if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
+               if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
                        dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
                        return USB3_LPM_DISABLED;
                }
        }
 
-       if (xhci->quirks & XHCI_INTEL_HOST)
-               timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
-       else
-               timeout_ns = udev->u2_params.sel;
-
        /* The U2 timeout is encoded in 256us intervals */
        timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
        /* If the necessary timeout value is bigger than what we can set in the
index 73ebfa6e9715e336c805f3330986eb9a2ad3c802..c640f98d20c54840cca2d65e43d4e0fed37d52ac 100644 (file)
@@ -496,6 +496,9 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
                timeout = schedule_timeout(YUREX_WRITE_TIMEOUT);
        finish_wait(&dev->waitq, &wait);
 
+       /* make sure URB is idle after timeout or (spurious) CMD_ACK */
+       usb_kill_urb(dev->cntl_urb);
+
        mutex_unlock(&dev->io_mutex);
 
        if (retval < 0) {
index f1201d4de29702b85fc22fbaa3760f21b96cd010..e8f06b41a50397af28e537606b65a2cce6e37c99 100644 (file)
@@ -532,23 +532,29 @@ static int iuu_uart_flush(struct usb_serial_port *port)
        struct device *dev = &port->dev;
        int i;
        int status;
-       u8 rxcmd = IUU_UART_RX;
+       u8 *rxcmd;
        struct iuu_private *priv = usb_get_serial_port_data(port);
 
        if (iuu_led(port, 0xF000, 0, 0, 0xFF) < 0)
                return -EIO;
 
+       rxcmd = kmalloc(1, GFP_KERNEL);
+       if (!rxcmd)
+               return -ENOMEM;
+
+       rxcmd[0] = IUU_UART_RX;
+
        for (i = 0; i < 2; i++) {
-               status = bulk_immediate(port, &rxcmd, 1);
+               status = bulk_immediate(port, rxcmd, 1);
                if (status != IUU_OPERATION_OK) {
                        dev_dbg(dev, "%s - uart_flush_write error\n", __func__);
-                       return status;
+                       goto out_free;
                }
 
                status = read_immediate(port, &priv->len, 1);
                if (status != IUU_OPERATION_OK) {
                        dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
-                       return status;
+                       goto out_free;
                }
 
                if (priv->len > 0) {
@@ -556,12 +562,16 @@ static int iuu_uart_flush(struct usb_serial_port *port)
                        status = read_immediate(port, priv->buf, priv->len);
                        if (status != IUU_OPERATION_OK) {
                                dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
-                               return status;
+                               goto out_free;
                        }
                }
        }
        dev_dbg(dev, "%s - uart_flush_read OK!\n", __func__);
        iuu_led(port, 0, 0xF000, 0, 0xFF);
+
+out_free:
+       kfree(rxcmd);
+
        return status;
 }
 
index 2c21e34235bbb797baf585b94bbf8284a06e9142..3fe959104311b4019497b288194647afe1aba653 100644 (file)
@@ -1117,6 +1117,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
          .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0xff, 0x30) }, /* EM160R-GL */
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
@@ -2057,6 +2059,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff),                     /* Fibocom NL678 series */
          .driver_info = RSVD(6) },
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) },                   /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
+       { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) },                   /* LongSung M5710 */
        { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) },                   /* GosunCn GM500 RNDIS */
        { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) },                   /* GosunCn GM500 MBIM */
        { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) },                   /* GosunCn GM500 ECM/NCM */
index 870e9cf3d5dc4fc1b106df650a48bfcea230ad89..f9677a5ec31b2869fd21743c9a74c00968842d6a 100644 (file)
@@ -90,6 +90,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_BROKEN_FUA),
 
+/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
+UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999,
+               "PNY",
+               "Pro Elite SSD",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_ATA_1X),
+
 /* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
 UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999,
                "PNY",
index 187690fd1a5bd35b53e7db3969f64e5e38aabf25..60d375e9c3c7cb12a8a1b74541f5ae7de65c5d3a 100644 (file)
@@ -20,6 +20,6 @@ config TYPEC_NVIDIA_ALTMODE
          to enable support for VirtualLink devices with NVIDIA GPUs.
 
          To compile this driver as a module, choose M here: the
-         module will be called typec_displayport.
+         module will be called typec_nvidia.
 
 endmenu
index ebfd3113a9a8028c4e17bc9d033c66a3b98e4116..8f77669f9cf4fad37ff036c922eb6d7a524b7ddf 100644 (file)
@@ -766,6 +766,7 @@ int typec_partner_set_num_altmodes(struct typec_partner *partner, int num_altmod
                return ret;
 
        sysfs_notify(&partner->dev.kobj, NULL, "number_of_alternate_modes");
+       kobject_uevent(&partner->dev.kobj, KOBJ_CHANGE);
 
        return 0;
 }
@@ -923,6 +924,7 @@ int typec_plug_set_num_altmodes(struct typec_plug *plug, int num_altmodes)
                return ret;
 
        sysfs_notify(&plug->dev.kobj, NULL, "number_of_alternate_modes");
+       kobject_uevent(&plug->dev.kobj, KOBJ_CHANGE);
 
        return 0;
 }
index cf37a59ce13044dc50c8d200914548f52d122af1..46a25b8db72e56424f6f163ab9763b80845dfa95 100644 (file)
@@ -207,10 +207,21 @@ static int
 pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_displayport_data *dp)
 {
        u8 msg[2] = { };
+       int ret;
 
        msg[0] = PMC_USB_DP_HPD;
        msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
 
+       /* Configure HPD first if HPD,IRQ comes together */
+       if (!IOM_PORT_HPD_ASSERTED(port->iom_status) &&
+           dp->status & DP_STATUS_IRQ_HPD &&
+           dp->status & DP_STATUS_HPD_STATE) {
+               msg[1] = PMC_USB_DP_HPD_LVL;
+               ret = pmc_usb_command(port, msg, sizeof(msg));
+               if (ret)
+                       return ret;
+       }
+
        if (dp->status & DP_STATUS_IRQ_HPD)
                msg[1] = PMC_USB_DP_HPD_IRQ;
 
index 66cde5e5f7964a597bc802b8e7718c5677d42bde..3209b5ddd30c97307e5428c72b187cb4f00fa049 100644 (file)
@@ -396,6 +396,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                default:
                        usbip_dbg_vhci_rh(" ClearPortFeature: default %x\n",
                                          wValue);
+                       if (wValue >= 32)
+                               goto error;
                        vhci_hcd->port_status[rhport] &= ~(1 << wValue);
                        break;
                }
index 531a00d703cdf89d76b581e969092c59f1098fa5..c8784dfafdd73391754f113dd49fd6ba2fa65f2d 100644 (file)
@@ -863,6 +863,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
        size_t len, total_len = 0;
        int err;
        struct vhost_net_ubuf_ref *ubufs;
+       struct ubuf_info *ubuf;
        bool zcopy_used;
        int sent_pkts = 0;
 
@@ -895,9 +896,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
 
                /* use msg_control to pass vhost zerocopy ubuf info to skb */
                if (zcopy_used) {
-                       struct ubuf_info *ubuf;
                        ubuf = nvq->ubuf_info + nvq->upend_idx;
-
                        vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
                        vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
                        ubuf->callback = vhost_zerocopy_callback;
@@ -927,7 +926,8 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
                err = sock->ops->sendmsg(sock, &msg, len);
                if (unlikely(err < 0)) {
                        if (zcopy_used) {
-                               vhost_net_ubuf_put(ubufs);
+                               if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS)
+                                       vhost_net_ubuf_put(ubufs);
                                nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
                                        % UIO_MAXIOV;
                        }
index a483cec31d5cbd2f9d69f5caa869b28a98d3fd6c..5e78fb719602dc3888b93ec0f762b453caa4387e 100644 (file)
 #define VHOST_VSOCK_PKT_WEIGHT 256
 
 enum {
-       VHOST_VSOCK_FEATURES = VHOST_FEATURES,
+       VHOST_VSOCK_FEATURES = VHOST_FEATURES |
+                              (1ULL << VIRTIO_F_ACCESS_PLATFORM)
+};
+
+enum {
+       VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
 };
 
 /* Used to track all the vhost_vsock instances on the system. */
@@ -94,6 +99,9 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
        if (!vhost_vq_get_backend(vq))
                goto out;
 
+       if (!vq_meta_prefetch(vq))
+               goto out;
+
        /* Avoid further vmexits, we're already processing the virtqueue */
        vhost_disable_notify(&vsock->dev, vq);
 
@@ -449,6 +457,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
        if (!vhost_vq_get_backend(vq))
                goto out;
 
+       if (!vq_meta_prefetch(vq))
+               goto out;
+
        vhost_disable_notify(&vsock->dev, vq);
        do {
                u32 len;
@@ -766,8 +777,12 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
        mutex_lock(&vsock->dev.mutex);
        if ((features & (1 << VHOST_F_LOG_ALL)) &&
            !vhost_log_access_ok(&vsock->dev)) {
-               mutex_unlock(&vsock->dev.mutex);
-               return -EFAULT;
+               goto err;
+       }
+
+       if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
+               if (vhost_init_device_iotlb(&vsock->dev, true))
+                       goto err;
        }
 
        for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
@@ -778,6 +793,10 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
        }
        mutex_unlock(&vsock->dev.mutex);
        return 0;
+
+err:
+       mutex_unlock(&vsock->dev.mutex);
+       return -EFAULT;
 }
 
 static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
@@ -811,6 +830,18 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
                if (copy_from_user(&features, argp, sizeof(features)))
                        return -EFAULT;
                return vhost_vsock_set_features(vsock, features);
+       case VHOST_GET_BACKEND_FEATURES:
+               features = VHOST_VSOCK_BACKEND_FEATURES;
+               if (copy_to_user(argp, &features, sizeof(features)))
+                       return -EFAULT;
+               return 0;
+       case VHOST_SET_BACKEND_FEATURES:
+               if (copy_from_user(&features, argp, sizeof(features)))
+                       return -EFAULT;
+               if (features & ~VHOST_VSOCK_BACKEND_FEATURES)
+                       return -EOPNOTSUPP;
+               vhost_set_backend_features(&vsock->dev, features);
+               return 0;
        default:
                mutex_lock(&vsock->dev.mutex);
                r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
@@ -823,6 +854,34 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
        }
 }
 
+static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+       struct file *file = iocb->ki_filp;
+       struct vhost_vsock *vsock = file->private_data;
+       struct vhost_dev *dev = &vsock->dev;
+       int noblock = file->f_flags & O_NONBLOCK;
+
+       return vhost_chr_read_iter(dev, to, noblock);
+}
+
+static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb,
+                                       struct iov_iter *from)
+{
+       struct file *file = iocb->ki_filp;
+       struct vhost_vsock *vsock = file->private_data;
+       struct vhost_dev *dev = &vsock->dev;
+
+       return vhost_chr_write_iter(dev, from);
+}
+
+static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait)
+{
+       struct vhost_vsock *vsock = file->private_data;
+       struct vhost_dev *dev = &vsock->dev;
+
+       return vhost_chr_poll(file, dev, wait);
+}
+
 static const struct file_operations vhost_vsock_fops = {
        .owner          = THIS_MODULE,
        .open           = vhost_vsock_dev_open,
@@ -830,6 +889,9 @@ static const struct file_operations vhost_vsock_fops = {
        .llseek         = noop_llseek,
        .unlocked_ioctl = vhost_vsock_dev_ioctl,
        .compat_ioctl   = compat_ptr_ioctl,
+       .read_iter      = vhost_vsock_chr_read_iter,
+       .write_iter     = vhost_vsock_chr_write_iter,
+       .poll           = vhost_vsock_chr_poll,
 };
 
 static struct miscdevice vhost_vsock_misc = {
index a8030332a19169b4805c8537b2a0df359a491833..e850f79351cbb4690fe757293935daeab179c5bf 100644 (file)
@@ -2060,16 +2060,6 @@ static struct irq_chip xen_percpu_chip __read_mostly = {
        .irq_ack                = ack_dynirq,
 };
 
-int xen_set_callback_via(uint64_t via)
-{
-       struct xen_hvm_param a;
-       a.domid = DOMID_SELF;
-       a.index = HVM_PARAM_CALLBACK_IRQ;
-       a.value = via;
-       return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
-}
-EXPORT_SYMBOL_GPL(xen_set_callback_via);
-
 #ifdef CONFIG_XEN_PVHVM
 /* Vector callbacks are better than PCI interrupts to receive event
  * channel notifications because we can receive vector callbacks on any
index dd911e1ff782cc4d54556d1d034fa9ed15c0b7bd..18f0ed8b1f93b8fab25824162f86bac9beb2d781 100644 (file)
@@ -132,6 +132,13 @@ static int platform_pci_probe(struct pci_dev *pdev,
                        dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
                        goto out;
                }
+               /*
+                * It doesn't strictly *have* to run on CPU0 but it sure
+                * as hell better process the event channel ports delivered
+                * to CPU0.
+                */
+               irq_set_affinity(pdev->irq, cpumask_of(0));
+
                callback_via = get_callback_via(pdev);
                ret = xen_set_callback_via(callback_via);
                if (ret) {
@@ -149,7 +156,6 @@ static int platform_pci_probe(struct pci_dev *pdev,
        ret = gnttab_init();
        if (ret)
                goto grant_out;
-       xenbus_probe(NULL);
        return 0;
 grant_out:
        gnttab_free_auto_xlat_frames();
index b0c73c58f9874a34f81fcd4477cc752cdfdbf381..720a7b7abd46d690f2ed86f544064403ab4f1647 100644 (file)
@@ -717,14 +717,15 @@ static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
        return 0;
 }
 
-static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
+static long privcmd_ioctl_mmap_resource(struct file *file,
+                               struct privcmd_mmap_resource __user *udata)
 {
        struct privcmd_data *data = file->private_data;
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
        struct privcmd_mmap_resource kdata;
        xen_pfn_t *pfns = NULL;
-       struct xen_mem_acquire_resource xdata;
+       struct xen_mem_acquire_resource xdata = { };
        int rc;
 
        if (copy_from_user(&kdata, udata, sizeof(kdata)))
@@ -734,6 +735,22 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
        if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
                return -EPERM;
 
+       /* Both fields must be set or unset */
+       if (!!kdata.addr != !!kdata.num)
+               return -EINVAL;
+
+       xdata.domid = kdata.dom;
+       xdata.type = kdata.type;
+       xdata.id = kdata.id;
+
+       if (!kdata.addr && !kdata.num) {
+               /* Query the size of the resource. */
+               rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
+               if (rc)
+                       return rc;
+               return __put_user(xdata.nr_frames, &udata->num);
+       }
+
        mmap_write_lock(mm);
 
        vma = find_vma(mm, kdata.addr);
@@ -768,10 +785,6 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
        } else
                vma->vm_private_data = PRIV_VMA_LOCKED;
 
-       memset(&xdata, 0, sizeof(xdata));
-       xdata.domid = kdata.dom;
-       xdata.type = kdata.type;
-       xdata.id = kdata.id;
        xdata.frame = kdata.idx;
        xdata.nr_frames = kdata.num;
        set_xen_guest_handle(xdata.frame_list, pfns);
index 2a93b7c9c159929557f845c549b7f7676efd3a11..dc153733541441ed624066829d88b407effe429a 100644 (file)
@@ -115,6 +115,7 @@ int xenbus_probe_node(struct xen_bus_type *bus,
                      const char *type,
                      const char *nodename);
 int xenbus_probe_devices(struct xen_bus_type *bus);
+void xenbus_probe(void);
 
 void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
 
index eb5151fc8efab02899ce764b99a8d6f2955e9ca7..e5fda0256feb3d476aeee5f2d1e28ac7bd29f226 100644 (file)
@@ -57,16 +57,8 @@ DEFINE_MUTEX(xs_response_mutex);
 static int xenbus_irq;
 static struct task_struct *xenbus_task;
 
-static DECLARE_WORK(probe_work, xenbus_probe);
-
-
 static irqreturn_t wake_waiting(int irq, void *unused)
 {
-       if (unlikely(xenstored_ready == 0)) {
-               xenstored_ready = 1;
-               schedule_work(&probe_work);
-       }
-
        wake_up(&xb_waitq);
        return IRQ_HANDLED;
 }
index 44634d970a5ca1f3bb4b0a1278e245121cd69135..c8f0282bb64975b34f44459c0565156293b0ea64 100644 (file)
@@ -683,29 +683,76 @@ void unregister_xenstore_notifier(struct notifier_block *nb)
 }
 EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
 
-void xenbus_probe(struct work_struct *unused)
+void xenbus_probe(void)
 {
        xenstored_ready = 1;
 
+       /*
+        * In the HVM case, xenbus_init() deferred its call to
+        * xs_init() in case callbacks were not operational yet.
+        * So do it now.
+        */
+       if (xen_store_domain_type == XS_HVM)
+               xs_init();
+
        /* Notify others that xenstore is up */
        blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
 }
-EXPORT_SYMBOL_GPL(xenbus_probe);
 
-static int __init xenbus_probe_initcall(void)
+/*
+ * Returns true when XenStore init must be deferred in order to
+ * allow the PCI platform device to be initialised, before we
+ * can actually have event channel interrupts working.
+ */
+static bool xs_hvm_defer_init_for_callback(void)
 {
-       if (!xen_domain())
-               return -ENODEV;
+#ifdef CONFIG_XEN_PVHVM
+       return xen_store_domain_type == XS_HVM &&
+               !xen_have_vector_callback;
+#else
+       return false;
+#endif
+}
 
-       if (xen_initial_domain() || xen_hvm_domain())
-               return 0;
+static int __init xenbus_probe_initcall(void)
+{
+       /*
+        * Probe XenBus here in the XS_PV case, and also XS_HVM unless we
+        * need to wait for the platform PCI device to come up.
+        */
+       if (xen_store_domain_type == XS_PV ||
+           (xen_store_domain_type == XS_HVM &&
+            !xs_hvm_defer_init_for_callback()))
+               xenbus_probe();
 
-       xenbus_probe(NULL);
        return 0;
 }
-
 device_initcall(xenbus_probe_initcall);
 
+int xen_set_callback_via(uint64_t via)
+{
+       struct xen_hvm_param a;
+       int ret;
+
+       a.domid = DOMID_SELF;
+       a.index = HVM_PARAM_CALLBACK_IRQ;
+       a.value = via;
+
+       ret = HYPERVISOR_hvm_op(HVMOP_set_param, &a);
+       if (ret)
+               return ret;
+
+       /*
+        * If xenbus_probe_initcall() deferred the xenbus_probe()
+        * due to the callback not functioning yet, we can do it now.
+        */
+       if (!xenstored_ready && xs_hvm_defer_init_for_callback())
+               xenbus_probe();
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(xen_set_callback_via);
+
 /* Set up event channel for xenstored which is run as a local process
  * (this is normally used only in dom0)
  */
@@ -818,11 +865,17 @@ static int __init xenbus_init(void)
                break;
        }
 
-       /* Initialize the interface to xenstore. */
-       err = xs_init();
-       if (err) {
-               pr_warn("Error initializing xenstore comms: %i\n", err);
-               goto out_error;
+       /*
+        * HVM domains may not have a functional callback yet. In that
+        * case let xs_init() be called from xenbus_probe(), which will
+        * get invoked at an appropriate time.
+        */
+       if (xen_store_domain_type != XS_HVM) {
+               err = xs_init();
+               if (err) {
+                       pr_warn("Error initializing xenstore comms: %i\n", err);
+                       goto out_error;
+               }
        }
 
        if ((xen_store_domain_type != XS_LOCAL) &&
index 9068d5578a26f9a6c1fe51a37599af0ff9c08e66..7bd659ad959ec0eb61046e4eaea83b220ca48aaf 100644 (file)
@@ -350,7 +350,7 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
                                 unsigned blkoff)
 {
        union afs_xdr_dirent *dire;
-       unsigned offset, next, curr;
+       unsigned offset, next, curr, nr_slots;
        size_t nlen;
        int tmp;
 
@@ -363,13 +363,12 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
             offset < AFS_DIR_SLOTS_PER_BLOCK;
             offset = next
             ) {
-               next = offset + 1;
-
                /* skip entries marked unused in the bitmap */
                if (!(block->hdr.bitmap[offset / 8] &
                      (1 << (offset % 8)))) {
                        _debug("ENT[%zu.%u]: unused",
                               blkoff / sizeof(union afs_xdr_dir_block), offset);
+                       next = offset + 1;
                        if (offset >= curr)
                                ctx->pos = blkoff +
                                        next * sizeof(union afs_xdr_dirent);
@@ -381,35 +380,39 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
                nlen = strnlen(dire->u.name,
                               sizeof(*block) -
                               offset * sizeof(union afs_xdr_dirent));
+               if (nlen > AFSNAMEMAX - 1) {
+                       _debug("ENT[%zu]: name too long (len %u/%zu)",
+                              blkoff / sizeof(union afs_xdr_dir_block),
+                              offset, nlen);
+                       return afs_bad(dvnode, afs_file_error_dir_name_too_long);
+               }
 
                _debug("ENT[%zu.%u]: %s %zu \"%s\"",
                       blkoff / sizeof(union afs_xdr_dir_block), offset,
                       (offset < curr ? "skip" : "fill"),
                       nlen, dire->u.name);
 
-               /* work out where the next possible entry is */
-               for (tmp = nlen; tmp > 15; tmp -= sizeof(union afs_xdr_dirent)) {
-                       if (next >= AFS_DIR_SLOTS_PER_BLOCK) {
-                               _debug("ENT[%zu.%u]:"
-                                      " %u travelled beyond end dir block"
-                                      " (len %u/%zu)",
-                                      blkoff / sizeof(union afs_xdr_dir_block),
-                                      offset, next, tmp, nlen);
-                               return afs_bad(dvnode, afs_file_error_dir_over_end);
-                       }
-                       if (!(block->hdr.bitmap[next / 8] &
-                             (1 << (next % 8)))) {
-                               _debug("ENT[%zu.%u]:"
-                                      " %u unmarked extension (len %u/%zu)",
+               nr_slots = afs_dir_calc_slots(nlen);
+               next = offset + nr_slots;
+               if (next > AFS_DIR_SLOTS_PER_BLOCK) {
+                       _debug("ENT[%zu.%u]:"
+                              " %u extends beyond end dir block"
+                              " (len %zu)",
+                              blkoff / sizeof(union afs_xdr_dir_block),
+                              offset, next, nlen);
+                       return afs_bad(dvnode, afs_file_error_dir_over_end);
+               }
+
+               /* Check that the name-extension dirents are all allocated */
+               for (tmp = 1; tmp < nr_slots; tmp++) {
+                       unsigned int ix = offset + tmp;
+                       if (!(block->hdr.bitmap[ix / 8] & (1 << (ix % 8)))) {
+                               _debug("ENT[%zu.u]:"
+                                      " %u unmarked extension (%u/%u)",
                                       blkoff / sizeof(union afs_xdr_dir_block),
-                                      offset, next, tmp, nlen);
+                                      offset, tmp, nr_slots);
                                return afs_bad(dvnode, afs_file_error_dir_unmarked_ext);
                        }
-
-                       _debug("ENT[%zu.%u]: ext %u/%zu",
-                              blkoff / sizeof(union afs_xdr_dir_block),
-                              next, tmp, nlen);
-                       next++;
                }
 
                /* skip if starts before the current position */
index 2ffe09abae7fcca86bf66471eb80a91e580bcb2e..f4600c1353adf79fa002c8864ae574bee9c694ec 100644 (file)
@@ -215,8 +215,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
        }
 
        /* Work out how many slots we're going to need. */
-       need_slots = round_up(12 + name->len + 1 + 4, AFS_DIR_DIRENT_SIZE);
-       need_slots /= AFS_DIR_DIRENT_SIZE;
+       need_slots = afs_dir_calc_slots(name->len);
 
        meta_page = kmap(page0);
        meta = &meta_page->blocks[0];
@@ -393,8 +392,7 @@ void afs_edit_dir_remove(struct afs_vnode *vnode,
        }
 
        /* Work out how many slots we're going to discard. */
-       need_slots = round_up(12 + name->len + 1 + 4, AFS_DIR_DIRENT_SIZE);
-       need_slots /= AFS_DIR_DIRENT_SIZE;
+       need_slots = afs_dir_calc_slots(name->len);
 
        meta_page = kmap(page0);
        meta = &meta_page->blocks[0];
index 94f1f398eefadc0b899d67e23bece7b30da4abeb..8ca8681645077d0bd86ff748462328a8718c1735 100644 (file)
@@ -54,10 +54,16 @@ union afs_xdr_dirent {
                __be16          hash_next;
                __be32          vnode;
                __be32          unique;
-               u8              name[16];
-               u8              overflow[4];    /* if any char of the name (inc
-                                                * NUL) reaches here, consume
-                                                * the next dirent too */
+               u8              name[];
+               /* When determining the number of dirent slots needed to
+                * represent a directory entry, name should be assumed to be 16
+                * bytes, due to a now-standardised (mis)calculation, but it is
+                * in fact 20 bytes in size.  afs_dir_calc_slots() should be
+                * used for this.
+                *
+                * For names longer than (16 or) 20 bytes, extra slots should
+                * be annexed to this one using the extended_name format.
+                */
        } u;
        u8                      extended_name[32];
 } __packed;
@@ -96,4 +102,15 @@ struct afs_xdr_dir_page {
        union afs_xdr_dir_block blocks[AFS_DIR_BLOCKS_PER_PAGE];
 };
 
+/*
+ * Calculate the number of dirent slots required for any given name length.
+ * The calculation is made assuming the part of the name in the first slot is
+ * 16 bytes, rather than 20, but this miscalculation is now standardised.
+ */
+static inline unsigned int afs_dir_calc_slots(size_t name_len)
+{
+       name_len++; /* NUL-terminated */
+       return 1 + ((name_len + 15) / AFS_DIR_DIRENT_SIZE);
+}
+
 #endif /* XDR_FS_H */
index 9293045e128cdcc7eb1d467cfaace1e5c5850441..3b8963e228a1ba82c0ac117d3a00892fc6b83abf 100644 (file)
@@ -605,6 +605,8 @@ int thaw_bdev(struct block_device *bdev)
                error = thaw_super(sb);
        if (error)
                bdev->bd_fsfreeze_count++;
+       else
+               bdev->bd_fsfreeze_sb = NULL;
 out:
        mutex_unlock(&bdev->bd_fsfreeze_mutex);
        return error;
@@ -774,8 +776,11 @@ static struct kmem_cache * bdev_cachep __read_mostly;
 static struct inode *bdev_alloc_inode(struct super_block *sb)
 {
        struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
+
        if (!ei)
                return NULL;
+       memset(&ei->bdev, 0, sizeof(ei->bdev));
+       ei->bdev.bd_bdi = &noop_backing_dev_info;
        return &ei->vfs_inode;
 }
 
@@ -869,14 +874,12 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
        mapping_set_gfp_mask(&inode->i_data, GFP_USER);
 
        bdev = I_BDEV(inode);
-       memset(bdev, 0, sizeof(*bdev));
        mutex_init(&bdev->bd_mutex);
        mutex_init(&bdev->bd_fsfreeze_mutex);
        spin_lock_init(&bdev->bd_size_lock);
        bdev->bd_disk = disk;
        bdev->bd_partno = partno;
        bdev->bd_inode = inode;
-       bdev->bd_bdi = &noop_backing_dev_info;
 #ifdef CONFIG_SYSFS
        INIT_LIST_HEAD(&bdev->bd_holder_disks);
 #endif
@@ -1055,7 +1058,6 @@ static void bd_finish_claiming(struct block_device *bdev, void *holder)
 /**
  * bd_abort_claiming - abort claiming of a block device
  * @bdev: block device of interest
- * @whole: whole block device
  * @holder: holder that has claimed @bdev
  *
  * Abort claiming of a block device when the exclusive open failed. This can be
@@ -1828,6 +1830,7 @@ const struct file_operations def_blk_fops = {
 /**
  * lookup_bdev  - lookup a struct block_device by name
  * @pathname:  special file representing the block device
+ * @dev:       return value of the block device's dev_t
  *
  * Get a reference to the blockdevice at @pathname in the current
  * namespace if possible and return it.  Return ERR_PTR(error)
index 02d7d7b2563b5cb9f0fdc7e8fc9e910731016fe3..9cadacf3ec2754def54db3505c4a5cc565329be2 100644 (file)
@@ -3117,7 +3117,7 @@ void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
                list_del_init(&lower->list);
                if (lower == node)
                        node = NULL;
-               btrfs_backref_free_node(cache, lower);
+               btrfs_backref_drop_node(cache, lower);
        }
 
        btrfs_backref_cleanup_node(cache, node);
index 52f2198d44c95c513d942af7a50f6807820cd931..0886e81e554020daf145ba6a0b0044e6aeb5fc54 100644 (file)
@@ -2669,7 +2669,8 @@ again:
         * Go through delayed refs for all the stuff we've just kicked off
         * and then loop back (just once)
         */
-       ret = btrfs_run_delayed_refs(trans, 0);
+       if (!ret)
+               ret = btrfs_run_delayed_refs(trans, 0);
        if (!ret && loops == 0) {
                loops++;
                spin_lock(&cur_trans->dirty_bgs_lock);
index 555cbcef6585739b73bd2d5e9c495abddc462cca..d9bf53d9ff907cfe47603dc0bcb81d82f9afde99 100644 (file)
@@ -42,6 +42,15 @@ enum {
         * to an inode.
         */
        BTRFS_INODE_NO_XATTRS,
+       /*
+        * Set when we are in a context where we need to start a transaction and
+        * have dirty pages with the respective file range locked. This is to
+        * ensure that when reserving space for the transaction, if we are low
+        * on available space and need to flush delalloc, we will not flush
+        * delalloc for this inode, because that could result in a deadlock (on
+        * the file range, inode's io_tree).
+        */
+       BTRFS_INODE_NO_DELALLOC_FLUSH,
 };
 
 /* in memory btrfs inode */
index 07810891e20458ab308455dd960fc4373d18b3aa..cc89b63d65a4dfbd0ad0d95f9ebe02df58c281ce 100644 (file)
@@ -2555,8 +2555,14 @@ out:
  * @p:         Holds all btree nodes along the search path
  * @root:      The root node of the tree
  * @key:       The key we are looking for
- * @ins_len:   Indicates purpose of search, for inserts it is 1, for
- *             deletions it's -1. 0 for plain searches
+ * @ins_len:   Indicates purpose of search:
+ *              >0  for inserts it's size of item inserted (*)
+ *              <0  for deletions
+ *               0  for plain searches, not modifying the tree
+ *
+ *              (*) If size of item inserted doesn't include
+ *              sizeof(struct btrfs_item), then p->search_for_extension must
+ *              be set.
  * @cow:       boolean should CoW operations be performed. Must always be 1
  *             when modifying the tree.
  *
@@ -2717,6 +2723,20 @@ cow_done:
 
                if (level == 0) {
                        p->slots[level] = slot;
+                       /*
+                        * Item key already exists. In this case, if we are
+                        * allowed to insert the item (for example, in dir_item
+                        * case, item key collision is allowed), it will be
+                        * merged with the original item. Only the item size
+                        * grows, no new btrfs item will be added. If
+                        * search_for_extension is not set, ins_len already
+                        * accounts the size btrfs_item, deduct it here so leaf
+                        * space check will be correct.
+                        */
+                       if (ret == 0 && ins_len > 0 && !p->search_for_extension) {
+                               ASSERT(ins_len >= sizeof(struct btrfs_item));
+                               ins_len -= sizeof(struct btrfs_item);
+                       }
                        if (ins_len > 0 &&
                            btrfs_leaf_free_space(b) < ins_len) {
                                if (write_lock_level < 1) {
index 1d3c1e479f3dc04c2faa21b50300a23d08b2f43a..e6e37591f1ded91a97c6046b1b021ecdfcab1e7c 100644 (file)
@@ -131,6 +131,8 @@ enum {
         * defrag
         */
        BTRFS_FS_STATE_REMOUNTING,
+       /* Filesystem in RO mode */
+       BTRFS_FS_STATE_RO,
        /* Track if a transaction abort has been reported on this filesystem */
        BTRFS_FS_STATE_TRANS_ABORTED,
        /*
@@ -367,6 +369,12 @@ struct btrfs_path {
        unsigned int search_commit_root:1;
        unsigned int need_commit_sem:1;
        unsigned int skip_release_on_error:1;
+       /*
+        * Indicate that new item (btrfs_search_slot) is extending already
+        * existing item and ins_len contains only the data size and not item
+        * header (ie. sizeof(struct btrfs_item) is not included).
+        */
+       unsigned int search_for_extension:1;
 };
 #define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \
                                        sizeof(struct btrfs_item))
@@ -2885,10 +2893,26 @@ static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
  * If we remount the fs to be R/O or umount the fs, the cleaner needn't do
  * anything except sleeping. This function is used to check the status of
  * the fs.
+ * We check for BTRFS_FS_STATE_RO to avoid races with a concurrent remount,
+ * since setting and checking for SB_RDONLY in the superblock's flags is not
+ * atomic.
  */
 static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info)
 {
-       return fs_info->sb->s_flags & SB_RDONLY || btrfs_fs_closing(fs_info);
+       return test_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state) ||
+               btrfs_fs_closing(fs_info);
+}
+
+static inline void btrfs_set_sb_rdonly(struct super_block *sb)
+{
+       sb->s_flags |= SB_RDONLY;
+       set_bit(BTRFS_FS_STATE_RO, &btrfs_sb(sb)->fs_state);
+}
+
+static inline void btrfs_clear_sb_rdonly(struct super_block *sb)
+{
+       sb->s_flags &= ~SB_RDONLY;
+       clear_bit(BTRFS_FS_STATE_RO, &btrfs_sb(sb)->fs_state);
 }
 
 /* tree mod log functions from ctree.c */
@@ -3073,7 +3097,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
                               u32 min_type);
 
 int btrfs_start_delalloc_snapshot(struct btrfs_root *root);
-int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr);
+int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr,
+                              bool in_reclaim_context);
 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
                              unsigned int extra_bits,
                              struct extent_state **cached_state);
index a98e33f232d55373563390854077a3ca4601d1a0..324f646d6e5e273b944235abe67721a0e3c607fe 100644 (file)
@@ -715,7 +715,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
         * flush all outstanding I/O and inode extent mappings before the
         * copy operation is declared as being finished
         */
-       ret = btrfs_start_delalloc_roots(fs_info, U64_MAX);
+       ret = btrfs_start_delalloc_roots(fs_info, U64_MAX, false);
        if (ret) {
                mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
                return ret;
index 1db966bf85b244fcf96cb67153053d62b149ca4c..2b8383d4114490bcc162f174a95ff1d6b2ef79e1 100644 (file)
@@ -199,16 +199,15 @@ static struct btrfs_block_group *find_next_block_group(
 static struct btrfs_block_group *peek_discard_list(
                                        struct btrfs_discard_ctl *discard_ctl,
                                        enum btrfs_discard_state *discard_state,
-                                       int *discard_index)
+                                       int *discard_index, u64 now)
 {
        struct btrfs_block_group *block_group;
-       const u64 now = ktime_get_ns();
 
        spin_lock(&discard_ctl->lock);
 again:
        block_group = find_next_block_group(discard_ctl, now);
 
-       if (block_group && now > block_group->discard_eligible_time) {
+       if (block_group && now >= block_group->discard_eligible_time) {
                if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED &&
                    block_group->used != 0) {
                        if (btrfs_is_block_group_data_only(block_group))
@@ -222,12 +221,11 @@ again:
                        block_group->discard_state = BTRFS_DISCARD_EXTENTS;
                }
                discard_ctl->block_group = block_group;
+       }
+       if (block_group) {
                *discard_state = block_group->discard_state;
                *discard_index = block_group->discard_index;
-       } else {
-               block_group = NULL;
        }
-
        spin_unlock(&discard_ctl->lock);
 
        return block_group;
@@ -330,28 +328,15 @@ void btrfs_discard_queue_work(struct btrfs_discard_ctl *discard_ctl,
                btrfs_discard_schedule_work(discard_ctl, false);
 }
 
-/**
- * btrfs_discard_schedule_work - responsible for scheduling the discard work
- * @discard_ctl: discard control
- * @override: override the current timer
- *
- * Discards are issued by a delayed workqueue item.  @override is used to
- * update the current delay as the baseline delay interval is reevaluated on
- * transaction commit.  This is also maxed with any other rate limit.
- */
-void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
-                                bool override)
+static void __btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
+                                         u64 now, bool override)
 {
        struct btrfs_block_group *block_group;
-       const u64 now = ktime_get_ns();
-
-       spin_lock(&discard_ctl->lock);
 
        if (!btrfs_run_discard_work(discard_ctl))
-               goto out;
-
+               return;
        if (!override && delayed_work_pending(&discard_ctl->work))
-               goto out;
+               return;
 
        block_group = find_next_block_group(discard_ctl, now);
        if (block_group) {
@@ -393,7 +378,24 @@ void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
                mod_delayed_work(discard_ctl->discard_workers,
                                 &discard_ctl->work, nsecs_to_jiffies(delay));
        }
-out:
+}
+
+/*
+ * btrfs_discard_schedule_work - responsible for scheduling the discard work
+ * @discard_ctl:  discard control
+ * @override:     override the current timer
+ *
+ * Discards are issued by a delayed workqueue item.  @override is used to
+ * update the current delay as the baseline delay interval is reevaluated on
+ * transaction commit.  This is also maxed with any other rate limit.
+ */
+void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
+                                bool override)
+{
+       const u64 now = ktime_get_ns();
+
+       spin_lock(&discard_ctl->lock);
+       __btrfs_discard_schedule_work(discard_ctl, now, override);
        spin_unlock(&discard_ctl->lock);
 }
 
@@ -438,13 +440,18 @@ static void btrfs_discard_workfn(struct work_struct *work)
        int discard_index = 0;
        u64 trimmed = 0;
        u64 minlen = 0;
+       u64 now = ktime_get_ns();
 
        discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work);
 
        block_group = peek_discard_list(discard_ctl, &discard_state,
-                                       &discard_index);
+                                       &discard_index, now);
        if (!block_group || !btrfs_run_discard_work(discard_ctl))
                return;
+       if (now < block_group->discard_eligible_time) {
+               btrfs_discard_schedule_work(discard_ctl, false);
+               return;
+       }
 
        /* Perform discarding */
        minlen = discard_minlen[discard_index];
@@ -474,13 +481,6 @@ static void btrfs_discard_workfn(struct work_struct *work)
                discard_ctl->discard_extent_bytes += trimmed;
        }
 
-       /*
-        * Updated without locks as this is inside the workfn and nothing else
-        * is reading the values
-        */
-       discard_ctl->prev_discard = trimmed;
-       discard_ctl->prev_discard_time = ktime_get_ns();
-
        /* Determine next steps for a block_group */
        if (block_group->discard_cursor >= btrfs_block_group_end(block_group)) {
                if (discard_state == BTRFS_DISCARD_BITMAPS) {
@@ -496,11 +496,13 @@ static void btrfs_discard_workfn(struct work_struct *work)
                }
        }
 
+       now = ktime_get_ns();
        spin_lock(&discard_ctl->lock);
+       discard_ctl->prev_discard = trimmed;
+       discard_ctl->prev_discard_time = now;
        discard_ctl->block_group = NULL;
+       __btrfs_discard_schedule_work(discard_ctl, now, false);
        spin_unlock(&discard_ctl->lock);
-
-       btrfs_discard_schedule_work(discard_ctl, false);
 }
 
 /**
index 765deefda92b1770229001a2774f47d38341ba6b..6b35b7e8813697e59c3e97b27e4c952a8973edfd 100644 (file)
@@ -1457,7 +1457,7 @@ void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info)
                root = list_first_entry(&fs_info->allocated_roots,
                                        struct btrfs_root, leak_list);
                btrfs_err(fs_info, "leaked root %s refcount %d",
-                         btrfs_root_name(root->root_key.objectid, buf),
+                         btrfs_root_name(&root->root_key, buf),
                          refcount_read(&root->refs));
                while (refcount_read(&root->refs) > 1)
                        btrfs_put_root(root);
@@ -1729,7 +1729,7 @@ static int cleaner_kthread(void *arg)
                 */
                btrfs_delete_unused_bgs(fs_info);
 sleep:
-               clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
+               clear_and_wake_up_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
                if (kthread_should_park())
                        kthread_parkme();
                if (kthread_should_stop())
@@ -2830,6 +2830,9 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block
                return -ENOMEM;
        btrfs_init_delayed_root(fs_info->delayed_root);
 
+       if (sb_rdonly(sb))
+               set_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
+
        return btrfs_alloc_stripe_hash_table(fs_info);
 }
 
@@ -2969,6 +2972,7 @@ int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
                }
        }
 
+       ret = btrfs_find_orphan_roots(fs_info);
 out:
        return ret;
 }
@@ -3383,10 +3387,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
                }
        }
 
-       ret = btrfs_find_orphan_roots(fs_info);
-       if (ret)
-               goto fail_qgroup;
-
        fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true);
        if (IS_ERR(fs_info->fs_root)) {
                err = PTR_ERR(fs_info->fs_root);
@@ -4181,6 +4181,9 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
        invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
        btrfs_stop_all_workers(fs_info);
 
+       /* We shouldn't have any transaction open at this point */
+       ASSERT(list_empty(&fs_info->trans_list));
+
        clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
        free_root_pointers(fs_info, true);
        btrfs_free_fs_roots(fs_info);
index 56ea380f5a178817ad6627c23e3f612b4c502eac..30b1a630dc2f88b13d1612d5ec8f1dc4cf3f4ad7 100644 (file)
@@ -844,6 +844,7 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
        want = extent_ref_type(parent, owner);
        if (insert) {
                extra_size = btrfs_extent_inline_ref_size(want);
+               path->search_for_extension = 1;
                path->keep_locks = 1;
        } else
                extra_size = -1;
@@ -996,6 +997,7 @@ again:
 out:
        if (insert) {
                path->keep_locks = 0;
+               path->search_for_extension = 0;
                btrfs_unlock_up_safe(path, 1);
        }
        return err;
@@ -5547,7 +5549,15 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
                                goto out_free;
                        }
 
-                       trans = btrfs_start_transaction(tree_root, 0);
+                      /*
+                       * Use join to avoid potential EINTR from transaction
+                       * start. See wait_reserve_ticket and the whole
+                       * reservation callchain.
+                       */
+                       if (for_reloc)
+                               trans = btrfs_join_transaction(tree_root);
+                       else
+                               trans = btrfs_start_transaction(tree_root, 0);
                        if (IS_ERR(trans)) {
                                err = PTR_ERR(trans);
                                goto out_free;
index 6e3b72e63e4226d50014f5058da3b3cabd2c31a8..c9cee458e001bf8bfe5bd00d2aa8e5c68d51bcdd 100644 (file)
@@ -676,9 +676,7 @@ alloc_extent_state_atomic(struct extent_state *prealloc)
 
 static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
 {
-       struct inode *inode = tree->private_data;
-
-       btrfs_panic(btrfs_sb(inode->i_sb), err,
+       btrfs_panic(tree->fs_info, err,
        "locking error: extent tree was modified by another thread while locked");
 }
 
index 1545c22ef2804ce1f3690febd0565ecf2339d8d1..6ccfc019ad909eb06636d7007b31d1276902b665 100644 (file)
@@ -1016,8 +1016,10 @@ again:
        }
 
        btrfs_release_path(path);
+       path->search_for_extension = 1;
        ret = btrfs_search_slot(trans, root, &file_key, path,
                                csum_size, 1);
+       path->search_for_extension = 0;
        if (ret < 0)
                goto out;
 
index 8e23780acfaeb4d0a4c5acfbdc6b35b69f76d858..a8e0a6b038d3efb2c53a98ae8370262cd4101369 100644 (file)
@@ -9390,7 +9390,9 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode
  * some fairly slow code that needs optimization. This walks the list
  * of all the inodes with pending delalloc and forces them to disk.
  */
-static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot)
+static int start_delalloc_inodes(struct btrfs_root *root,
+                                struct writeback_control *wbc, bool snapshot,
+                                bool in_reclaim_context)
 {
        struct btrfs_inode *binode;
        struct inode *inode;
@@ -9398,6 +9400,7 @@ static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot
        struct list_head works;
        struct list_head splice;
        int ret = 0;
+       bool full_flush = wbc->nr_to_write == LONG_MAX;
 
        INIT_LIST_HEAD(&works);
        INIT_LIST_HEAD(&splice);
@@ -9411,6 +9414,11 @@ static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot
 
                list_move_tail(&binode->delalloc_inodes,
                               &root->delalloc_inodes);
+
+               if (in_reclaim_context &&
+                   test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags))
+                       continue;
+
                inode = igrab(&binode->vfs_inode);
                if (!inode) {
                        cond_resched_lock(&root->delalloc_lock);
@@ -9421,18 +9429,24 @@ static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot
                if (snapshot)
                        set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
                                &binode->runtime_flags);
-               work = btrfs_alloc_delalloc_work(inode);
-               if (!work) {
-                       iput(inode);
-                       ret = -ENOMEM;
-                       goto out;
-               }
-               list_add_tail(&work->list, &works);
-               btrfs_queue_work(root->fs_info->flush_workers,
-                                &work->work);
-               if (*nr != U64_MAX) {
-                       (*nr)--;
-                       if (*nr == 0)
+               if (full_flush) {
+                       work = btrfs_alloc_delalloc_work(inode);
+                       if (!work) {
+                               iput(inode);
+                               ret = -ENOMEM;
+                               goto out;
+                       }
+                       list_add_tail(&work->list, &works);
+                       btrfs_queue_work(root->fs_info->flush_workers,
+                                        &work->work);
+               } else {
+                       ret = sync_inode(inode, wbc);
+                       if (!ret &&
+                           test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+                                    &BTRFS_I(inode)->runtime_flags))
+                               ret = sync_inode(inode, wbc);
+                       btrfs_add_delayed_iput(inode);
+                       if (ret || wbc->nr_to_write <= 0)
                                goto out;
                }
                cond_resched();
@@ -9458,17 +9472,29 @@ out:
 
 int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
 {
+       struct writeback_control wbc = {
+               .nr_to_write = LONG_MAX,
+               .sync_mode = WB_SYNC_NONE,
+               .range_start = 0,
+               .range_end = LLONG_MAX,
+       };
        struct btrfs_fs_info *fs_info = root->fs_info;
-       u64 nr = U64_MAX;
 
        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
                return -EROFS;
 
-       return start_delalloc_inodes(root, &nr, true);
+       return start_delalloc_inodes(root, &wbc, true, false);
 }
 
-int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr)
+int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr,
+                              bool in_reclaim_context)
 {
+       struct writeback_control wbc = {
+               .nr_to_write = (nr == U64_MAX) ? LONG_MAX : (unsigned long)nr,
+               .sync_mode = WB_SYNC_NONE,
+               .range_start = 0,
+               .range_end = LLONG_MAX,
+       };
        struct btrfs_root *root;
        struct list_head splice;
        int ret;
@@ -9482,6 +9508,13 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr)
        spin_lock(&fs_info->delalloc_root_lock);
        list_splice_init(&fs_info->delalloc_roots, &splice);
        while (!list_empty(&splice) && nr) {
+               /*
+                * Reset nr_to_write here so we know that we're doing a full
+                * flush.
+                */
+               if (nr == U64_MAX)
+                       wbc.nr_to_write = LONG_MAX;
+
                root = list_first_entry(&splice, struct btrfs_root,
                                        delalloc_root);
                root = btrfs_grab_root(root);
@@ -9490,9 +9523,9 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr)
                               &fs_info->delalloc_roots);
                spin_unlock(&fs_info->delalloc_root_lock);
 
-               ret = start_delalloc_inodes(root, &nr, false);
+               ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context);
                btrfs_put_root(root);
-               if (ret < 0)
+               if (ret < 0 || wbc.nr_to_write <= 0)
                        goto out;
                spin_lock(&fs_info->delalloc_root_lock);
        }
index 703212ff50a56e348440b2302dd246d389388c5a..dde49a791f3e236e7ad7279589d4fb94da1917f2 100644 (file)
@@ -4951,7 +4951,7 @@ long btrfs_ioctl(struct file *file, unsigned int
        case BTRFS_IOC_SYNC: {
                int ret;
 
-               ret = btrfs_start_delalloc_roots(fs_info, U64_MAX);
+               ret = btrfs_start_delalloc_roots(fs_info, U64_MAX, false);
                if (ret)
                        return ret;
                ret = btrfs_sync_fs(inode->i_sb, 1);
index fe5e0026129d521041f78f08c8a5b83a54b7620a..aae1027bd76a1379e9397bef191b97ef3d978189 100644 (file)
@@ -26,22 +26,22 @@ static const struct root_name_map root_map[] = {
        { BTRFS_DATA_RELOC_TREE_OBJECTID,       "DATA_RELOC_TREE"       },
 };
 
-const char *btrfs_root_name(u64 objectid, char *buf)
+const char *btrfs_root_name(const struct btrfs_key *key, char *buf)
 {
        int i;
 
-       if (objectid == BTRFS_TREE_RELOC_OBJECTID) {
+       if (key->objectid == BTRFS_TREE_RELOC_OBJECTID) {
                snprintf(buf, BTRFS_ROOT_NAME_BUF_LEN,
-                        "TREE_RELOC offset=%llu", objectid);
+                        "TREE_RELOC offset=%llu", key->offset);
                return buf;
        }
 
        for (i = 0; i < ARRAY_SIZE(root_map); i++) {
-               if (root_map[i].id == objectid)
+               if (root_map[i].id == key->objectid)
                        return root_map[i].name;
        }
 
-       snprintf(buf, BTRFS_ROOT_NAME_BUF_LEN, "%llu", objectid);
+       snprintf(buf, BTRFS_ROOT_NAME_BUF_LEN, "%llu", key->objectid);
        return buf;
 }
 
index 78b99385a503fb9bc78fc4edf49fe38838837174..8c3e9319ec4efe455709f1ac5fb63a45302e4833 100644 (file)
@@ -11,6 +11,6 @@
 
 void btrfs_print_leaf(struct extent_buffer *l);
 void btrfs_print_tree(struct extent_buffer *c, bool follow);
-const char *btrfs_root_name(u64 objectid, char *buf);
+const char *btrfs_root_name(const struct btrfs_key *key, char *buf);
 
 #endif
index fe3046007f52abad65bcf437b1d22910574d6ff0..808370ada888992cff2e343eae11457cdd97dee9 100644 (file)
@@ -3190,6 +3190,12 @@ out:
        return ret;
 }
 
+static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
+{
+       return btrfs_fs_closing(fs_info) ||
+               test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
+}
+
 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
 {
        struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
@@ -3198,6 +3204,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
        struct btrfs_trans_handle *trans = NULL;
        int err = -ENOMEM;
        int ret = 0;
+       bool stopped = false;
 
        path = btrfs_alloc_path();
        if (!path)
@@ -3210,7 +3217,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
        path->skip_locking = 1;
 
        err = 0;
-       while (!err && !btrfs_fs_closing(fs_info)) {
+       while (!err && !(stopped = rescan_should_stop(fs_info))) {
                trans = btrfs_start_transaction(fs_info->fs_root, 0);
                if (IS_ERR(trans)) {
                        err = PTR_ERR(trans);
@@ -3253,7 +3260,7 @@ out:
        }
 
        mutex_lock(&fs_info->qgroup_rescan_lock);
-       if (!btrfs_fs_closing(fs_info))
+       if (!stopped)
                fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
        if (trans) {
                ret = update_qgroup_status_item(trans);
@@ -3272,7 +3279,7 @@ out:
 
        btrfs_end_transaction(trans);
 
-       if (btrfs_fs_closing(fs_info)) {
+       if (stopped) {
                btrfs_info(fs_info, "qgroup scan paused");
        } else if (err >= 0) {
                btrfs_info(fs_info, "qgroup scan completed%s",
@@ -3530,16 +3537,6 @@ static int try_flush_qgroup(struct btrfs_root *root)
        int ret;
        bool can_commit = true;
 
-       /*
-        * We don't want to run flush again and again, so if there is a running
-        * one, we won't try to start a new flush, but exit directly.
-        */
-       if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
-               wait_event(root->qgroup_flush_wait,
-                       !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
-               return 0;
-       }
-
        /*
         * If current process holds a transaction, we shouldn't flush, as we
         * assume all space reservation happens before a transaction handle is
@@ -3554,6 +3551,26 @@ static int try_flush_qgroup(struct btrfs_root *root)
            current->journal_info != BTRFS_SEND_TRANS_STUB)
                can_commit = false;
 
+       /*
+        * We don't want to run flush again and again, so if there is a running
+        * one, we won't try to start a new flush, but exit directly.
+        */
+       if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
+               /*
+                * We are already holding a transaction, thus we can block other
+                * threads from flushing.  So exit right now. This increases
+                * the chance of EDQUOT for heavy load and near limit cases.
+                * But we can argue that if we're already near limit, EDQUOT is
+                * unavoidable anyway.
+                */
+               if (!can_commit)
+                       return 0;
+
+               wait_event(root->qgroup_flush_wait,
+                       !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
+               return 0;
+       }
+
        ret = btrfs_start_delalloc_snapshot(root);
        if (ret < 0)
                goto out;
index ab80896315beff90cf4f5016d23e91b295b63df5..b03e7891394e36c838f83a7219beb065177f5ae0 100644 (file)
@@ -89,6 +89,19 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
        if (ret)
                goto out_unlock;
 
+       /*
+        * After dirtying the page our caller will need to start a transaction,
+        * and if we are low on metadata free space, that can cause flushing of
+        * delalloc for all inodes in order to get metadata space released.
+        * However we are holding the range locked for the whole duration of
+        * the clone/dedupe operation, so we may deadlock if that happens and no
+        * other task releases enough space. So mark this inode as not being
+        * possible to flush to avoid such deadlock. We will clear that flag
+        * when we finish cloning all extents, since a transaction is started
+        * after finding each extent to clone.
+        */
+       set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags);
+
        if (comp_type == BTRFS_COMPRESS_NONE) {
                char *map;
 
@@ -549,6 +562,8 @@ process_slot:
 out:
        btrfs_free_path(path);
        kvfree(buf);
+       clear_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &BTRFS_I(inode)->runtime_flags);
+
        return ret;
 }
 
index 19b7db8b211719b33938dcefeaaa07640787509b..df63ef64c5c0dd3512eaa70aabcf0db8509abef6 100644 (file)
@@ -2975,11 +2975,16 @@ static int delete_v1_space_cache(struct extent_buffer *leaf,
                return 0;
 
        for (i = 0; i < btrfs_header_nritems(leaf); i++) {
+               u8 type;
+
                btrfs_item_key_to_cpu(leaf, &key, i);
                if (key.type != BTRFS_EXTENT_DATA_KEY)
                        continue;
                ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
-               if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_REG &&
+               type = btrfs_file_extent_type(leaf, ei);
+
+               if ((type == BTRFS_FILE_EXTENT_REG ||
+                    type == BTRFS_FILE_EXTENT_PREALLOC) &&
                    btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) {
                        found = true;
                        space_cache_ino = key.objectid;
index d719a2755a40d100043b0ad8c9aeaf460fc13f72..78a35374d492914d94004d65268bda8d4da54a01 100644 (file)
@@ -236,6 +236,7 @@ struct waiting_dir_move {
         * after this directory is moved, we can try to rmdir the ino rmdir_ino.
         */
        u64 rmdir_ino;
+       u64 rmdir_gen;
        bool orphanized;
 };
 
@@ -316,7 +317,7 @@ static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
 static struct waiting_dir_move *
 get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
 
-static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino);
+static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen);
 
 static int need_send_hole(struct send_ctx *sctx)
 {
@@ -2299,7 +2300,7 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
 
                fs_path_reset(name);
 
-               if (is_waiting_for_rm(sctx, ino)) {
+               if (is_waiting_for_rm(sctx, ino, gen)) {
                        ret = gen_unique_name(sctx, ino, gen, name);
                        if (ret < 0)
                                goto out;
@@ -2858,8 +2859,8 @@ out:
        return ret;
 }
 
-static struct orphan_dir_info *
-add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
+static struct orphan_dir_info *add_orphan_dir_info(struct send_ctx *sctx,
+                                                  u64 dir_ino, u64 dir_gen)
 {
        struct rb_node **p = &sctx->orphan_dirs.rb_node;
        struct rb_node *parent = NULL;
@@ -2868,20 +2869,23 @@ add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
        while (*p) {
                parent = *p;
                entry = rb_entry(parent, struct orphan_dir_info, node);
-               if (dir_ino < entry->ino) {
+               if (dir_ino < entry->ino)
                        p = &(*p)->rb_left;
-               } else if (dir_ino > entry->ino) {
+               else if (dir_ino > entry->ino)
                        p = &(*p)->rb_right;
-               } else {
+               else if (dir_gen < entry->gen)
+                       p = &(*p)->rb_left;
+               else if (dir_gen > entry->gen)
+                       p = &(*p)->rb_right;
+               else
                        return entry;
-               }
        }
 
        odi = kmalloc(sizeof(*odi), GFP_KERNEL);
        if (!odi)
                return ERR_PTR(-ENOMEM);
        odi->ino = dir_ino;
-       odi->gen = 0;
+       odi->gen = dir_gen;
        odi->last_dir_index_offset = 0;
 
        rb_link_node(&odi->node, parent, p);
@@ -2889,8 +2893,8 @@ add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
        return odi;
 }
 
-static struct orphan_dir_info *
-get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
+static struct orphan_dir_info *get_orphan_dir_info(struct send_ctx *sctx,
+                                                  u64 dir_ino, u64 gen)
 {
        struct rb_node *n = sctx->orphan_dirs.rb_node;
        struct orphan_dir_info *entry;
@@ -2901,15 +2905,19 @@ get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
                        n = n->rb_left;
                else if (dir_ino > entry->ino)
                        n = n->rb_right;
+               else if (gen < entry->gen)
+                       n = n->rb_left;
+               else if (gen > entry->gen)
+                       n = n->rb_right;
                else
                        return entry;
        }
        return NULL;
 }
 
-static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino)
+static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen)
 {
-       struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino);
+       struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino, gen);
 
        return odi != NULL;
 }
@@ -2954,7 +2962,7 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
        key.type = BTRFS_DIR_INDEX_KEY;
        key.offset = 0;
 
-       odi = get_orphan_dir_info(sctx, dir);
+       odi = get_orphan_dir_info(sctx, dir, dir_gen);
        if (odi)
                key.offset = odi->last_dir_index_offset;
 
@@ -2985,7 +2993,7 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
 
                dm = get_waiting_dir_move(sctx, loc.objectid);
                if (dm) {
-                       odi = add_orphan_dir_info(sctx, dir);
+                       odi = add_orphan_dir_info(sctx, dir, dir_gen);
                        if (IS_ERR(odi)) {
                                ret = PTR_ERR(odi);
                                goto out;
@@ -2993,12 +3001,13 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
                        odi->gen = dir_gen;
                        odi->last_dir_index_offset = found_key.offset;
                        dm->rmdir_ino = dir;
+                       dm->rmdir_gen = dir_gen;
                        ret = 0;
                        goto out;
                }
 
                if (loc.objectid > send_progress) {
-                       odi = add_orphan_dir_info(sctx, dir);
+                       odi = add_orphan_dir_info(sctx, dir, dir_gen);
                        if (IS_ERR(odi)) {
                                ret = PTR_ERR(odi);
                                goto out;
@@ -3038,6 +3047,7 @@ static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
                return -ENOMEM;
        dm->ino = ino;
        dm->rmdir_ino = 0;
+       dm->rmdir_gen = 0;
        dm->orphanized = orphanized;
 
        while (*p) {
@@ -3183,7 +3193,7 @@ static int path_loop(struct send_ctx *sctx, struct fs_path *name,
        while (ino != BTRFS_FIRST_FREE_OBJECTID) {
                fs_path_reset(name);
 
-               if (is_waiting_for_rm(sctx, ino))
+               if (is_waiting_for_rm(sctx, ino, gen))
                        break;
                if (is_waiting_for_move(sctx, ino)) {
                        if (*ancestor_ino == 0)
@@ -3223,6 +3233,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
        u64 parent_ino, parent_gen;
        struct waiting_dir_move *dm = NULL;
        u64 rmdir_ino = 0;
+       u64 rmdir_gen;
        u64 ancestor;
        bool is_orphan;
        int ret;
@@ -3237,6 +3248,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
        dm = get_waiting_dir_move(sctx, pm->ino);
        ASSERT(dm);
        rmdir_ino = dm->rmdir_ino;
+       rmdir_gen = dm->rmdir_gen;
        is_orphan = dm->orphanized;
        free_waiting_dir_move(sctx, dm);
 
@@ -3273,6 +3285,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
                        dm = get_waiting_dir_move(sctx, pm->ino);
                        ASSERT(dm);
                        dm->rmdir_ino = rmdir_ino;
+                       dm->rmdir_gen = rmdir_gen;
                }
                goto out;
        }
@@ -3291,7 +3304,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
                struct orphan_dir_info *odi;
                u64 gen;
 
-               odi = get_orphan_dir_info(sctx, rmdir_ino);
+               odi = get_orphan_dir_info(sctx, rmdir_ino, rmdir_gen);
                if (!odi) {
                        /* already deleted */
                        goto finish;
@@ -5499,6 +5512,21 @@ static int clone_range(struct send_ctx *sctx,
                        break;
                offset += clone_len;
                clone_root->offset += clone_len;
+
+               /*
+                * If we are cloning from the file we are currently processing,
+                * and using the send root as the clone root, we must stop once
+                * the current clone offset reaches the current eof of the file
+                * at the receiver, otherwise we would issue an invalid clone
+                * operation (source range going beyond eof) and cause the
+                * receiver to fail. So if we reach the current eof, bail out
+                * and fallback to a regular write.
+                */
+               if (clone_root->root == sctx->send_root &&
+                   clone_root->ino == sctx->cur_ino &&
+                   clone_root->offset >= sctx->cur_inode_next_write_offset)
+                       break;
+
                data_offset += clone_len;
 next:
                path->slots[0]++;
index 64099565ab8f5d2d038a6ff84b76501fde56e686..e8347461c8ddddeee4169dc8df66171b7f0dce7f 100644 (file)
@@ -532,7 +532,9 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
 
        loops = 0;
        while ((delalloc_bytes || dio_bytes) && loops < 3) {
-               btrfs_start_delalloc_roots(fs_info, items);
+               u64 nr_pages = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
+
+               btrfs_start_delalloc_roots(fs_info, nr_pages, true);
 
                loops++;
                if (wait_ordered && !trans) {
index 022f2081008921fb9bab167d972c53360da7e35e..12d7d3be7cd456993405ab86fd16d92aa2a5a306 100644 (file)
@@ -175,7 +175,7 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
        btrfs_discard_stop(fs_info);
 
        /* btrfs handle error by forcing the filesystem readonly */
-       sb->s_flags |= SB_RDONLY;
+       btrfs_set_sb_rdonly(sb);
        btrfs_info(fs_info, "forced readonly");
        /*
         * Note that a running device replace operation is not canceled here
@@ -1953,7 +1953,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                /* avoid complains from lockdep et al. */
                up(&fs_info->uuid_tree_rescan_sem);
 
-               sb->s_flags |= SB_RDONLY;
+               btrfs_set_sb_rdonly(sb);
 
                /*
                 * Setting SB_RDONLY will put the cleaner thread to
@@ -1964,10 +1964,42 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                 */
                btrfs_delete_unused_bgs(fs_info);
 
+               /*
+                * The cleaner task could be already running before we set the
+                * flag BTRFS_FS_STATE_RO (and SB_RDONLY in the superblock).
+                * We must make sure that after we finish the remount, i.e. after
+                * we call btrfs_commit_super(), the cleaner can no longer start
+                * a transaction - either because it was dropping a dead root,
+                * running delayed iputs or deleting an unused block group (the
+                * cleaner picked a block group from the list of unused block
+                * groups before we were able to in the previous call to
+                * btrfs_delete_unused_bgs()).
+                */
+               wait_on_bit(&fs_info->flags, BTRFS_FS_CLEANER_RUNNING,
+                           TASK_UNINTERRUPTIBLE);
+
+               /*
+                * We've set the superblock to RO mode, so we might have made
+                * the cleaner task sleep without running all pending delayed
+                * iputs. Go through all the delayed iputs here, so that if an
+                * unmount happens without remounting RW we don't end up at
+                * finishing close_ctree() with a non-empty list of delayed
+                * iputs.
+                */
+               btrfs_run_delayed_iputs(fs_info);
+
                btrfs_dev_replace_suspend_for_unmount(fs_info);
                btrfs_scrub_cancel(fs_info);
                btrfs_pause_balance(fs_info);
 
+               /*
+                * Pause the qgroup rescan worker if it is running. We don't want
+                * it to be still running after we are in RO mode, as after that,
+                * by the time we unmount, it might have left a transaction open,
+                * so we would leak the transaction and/or crash.
+                */
+               btrfs_qgroup_wait_for_completion(fs_info, false);
+
                ret = btrfs_commit_super(fs_info);
                if (ret)
                        goto restore;
@@ -2006,7 +2038,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                if (ret)
                        goto restore;
 
-               sb->s_flags &= ~SB_RDONLY;
+               btrfs_clear_sb_rdonly(sb);
 
                set_bit(BTRFS_FS_OPEN, &fs_info->flags);
        }
@@ -2028,6 +2060,8 @@ restore:
        /* We've hit an error - don't reset SB_RDONLY */
        if (sb_rdonly(sb))
                old_flags |= SB_RDONLY;
+       if (!(old_flags & SB_RDONLY))
+               clear_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
        sb->s_flags = old_flags;
        fs_info->mount_opt = old_opts;
        fs_info->compress_type = old_compress_type;
index 8ca334d554afb9cef5539d1a94af71d2468ac36c..6bd97bd4cb37114720393f252202844dd9653593 100644 (file)
@@ -55,8 +55,14 @@ struct inode *btrfs_new_test_inode(void)
        struct inode *inode;
 
        inode = new_inode(test_mnt->mnt_sb);
-       if (inode)
-               inode_init_owner(inode, NULL, S_IFREG);
+       if (!inode)
+               return NULL;
+
+       inode->i_mode = S_IFREG;
+       BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
+       BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
+       BTRFS_I(inode)->location.offset = 0;
+       inode_init_owner(inode, NULL, S_IFREG);
 
        return inode;
 }
index 04022069761deb90a2651d9eed9a94469268eb92..c9874b12d337c7e8323584178b0ce55b321fe34e 100644 (file)
@@ -232,11 +232,6 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
                return ret;
        }
 
-       inode->i_mode = S_IFREG;
-       BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
-       BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
-       BTRFS_I(inode)->location.offset = 0;
-
        fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
        if (!fs_info) {
                test_std_err(TEST_ALLOC_FS_INFO);
@@ -835,10 +830,6 @@ static int test_hole_first(u32 sectorsize, u32 nodesize)
                return ret;
        }
 
-       BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
-       BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
-       BTRFS_I(inode)->location.offset = 0;
-
        fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
        if (!fs_info) {
                test_std_err(TEST_ALLOC_FS_INFO);
index 8e0f7a1029c6c8002b08a781efd17be012330a1c..6af7f2bf92de7d18c834147b6fbc09eb0dc6f062 100644 (file)
@@ -2264,14 +2264,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
         */
        btrfs_free_log_root_tree(trans, fs_info);
 
-       /*
-        * commit_fs_roots() can call btrfs_save_ino_cache(), which generates
-        * new delayed refs. Must handle them or qgroup can be wrong.
-        */
-       ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
-       if (ret)
-               goto unlock_tree_log;
-
        /*
         * Since fs roots are all committed, we can get a quite accurate
         * new_roots. So let's do quota accounting.
index 028e733e42f3b5fe01668b69e9c2a552722c231c..582061c7b54716e71563790beda87baa7fb26460 100644 (file)
@@ -760,6 +760,7 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
 {
        struct btrfs_fs_info *fs_info = leaf->fs_info;
        u64 length;
+       u64 chunk_end;
        u64 stripe_len;
        u16 num_stripes;
        u16 sub_stripes;
@@ -814,6 +815,12 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
                          "invalid chunk length, have %llu", length);
                return -EUCLEAN;
        }
+       if (unlikely(check_add_overflow(logical, length, &chunk_end))) {
+               chunk_err(leaf, chunk, logical,
+"invalid chunk logical start and length, have logical start %llu length %llu",
+                         logical, length);
+               return -EUCLEAN;
+       }
        if (unlikely(!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN)) {
                chunk_err(leaf, chunk, logical,
                          "invalid chunk stripe length: %llu",
index ee086fc56c30edbcb0e4529609e6519178da9409..0a6de859eb2226a2283076659fffe4afd22dd32b 100644 (file)
@@ -2592,7 +2592,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
        set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
 
        if (seeding_dev) {
-               sb->s_flags &= ~SB_RDONLY;
+               btrfs_clear_sb_rdonly(sb);
                ret = btrfs_prepare_sprout(fs_info);
                if (ret) {
                        btrfs_abort_transaction(trans, ret);
@@ -2728,7 +2728,7 @@ error_sysfs:
        mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 error_trans:
        if (seeding_dev)
-               sb->s_flags |= SB_RDONLY;
+               btrfs_set_sb_rdonly(sb);
        if (trans)
                btrfs_end_transaction(trans);
 error_free_zone:
@@ -4317,6 +4317,8 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
                btrfs_warn(fs_info,
        "balance: cannot set exclusive op status, resume manually");
 
+       btrfs_release_path(path);
+
        mutex_lock(&fs_info->balance_mutex);
        BUG_ON(fs_info->balance_ctl);
        spin_lock(&fs_info->balance_lock);
index 8bda092e60c5a00118ec8d5159f8a30bdcd38a04..e027c718ca01adea87dbd12bd5fa7b2945fb9c0c 100644 (file)
@@ -413,7 +413,6 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
 
        inode = d_backing_inode(object->backer);
        ASSERT(S_ISREG(inode->i_mode));
-       ASSERT(inode->i_mapping->a_ops->readpages);
 
        /* calculate the shift required to use bmap */
        shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
@@ -713,7 +712,6 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
 
        inode = d_backing_inode(object->backer);
        ASSERT(S_ISREG(inode->i_mode));
-       ASSERT(inode->i_mapping->a_ops->readpages);
 
        /* calculate the shift required to use bmap */
        shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
index 98c15ff2e599a46babba662464a53971d6111ee7..840587037b59bcceefd8a715482c05249c3d3e88 100644 (file)
@@ -2475,6 +2475,22 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
        return r;
 }
 
+static void encode_timestamp_and_gids(void **p,
+                                     const struct ceph_mds_request *req)
+{
+       struct ceph_timespec ts;
+       int i;
+
+       ceph_encode_timespec64(&ts, &req->r_stamp);
+       ceph_encode_copy(p, &ts, sizeof(ts));
+
+       /* gid_list */
+       ceph_encode_32(p, req->r_cred->group_info->ngroups);
+       for (i = 0; i < req->r_cred->group_info->ngroups; i++)
+               ceph_encode_64(p, from_kgid(&init_user_ns,
+                                           req->r_cred->group_info->gid[i]));
+}
+
 /*
  * called under mdsc->mutex
  */
@@ -2491,7 +2507,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
        u64 ino1 = 0, ino2 = 0;
        int pathlen1 = 0, pathlen2 = 0;
        bool freepath1 = false, freepath2 = false;
-       int len, i;
+       int len;
        u16 releases;
        void *p, *end;
        int ret;
@@ -2517,17 +2533,10 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
                goto out_free1;
        }
 
-       if (legacy) {
-               /* Old style */
-               len = sizeof(*head);
-       } else {
-               /* New style: add gid_list and any later fields */
-               len = sizeof(struct ceph_mds_request_head) + sizeof(u32) +
-                     (sizeof(u64) * req->r_cred->group_info->ngroups);
-       }
-
+       len = legacy ? sizeof(*head) : sizeof(struct ceph_mds_request_head);
        len += pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
                sizeof(struct ceph_timespec);
+       len += sizeof(u32) + (sizeof(u64) * req->r_cred->group_info->ngroups);
 
        /* calculate (max) length for cap releases */
        len += sizeof(struct ceph_mds_request_release) *
@@ -2548,7 +2557,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
        msg->hdr.tid = cpu_to_le64(req->r_tid);
 
        /*
-        * The old ceph_mds_request_header didn't contain a version field, and
+        * The old ceph_mds_request_head didn't contain a version field, and
         * one was added when we moved the message version from 3->4.
         */
        if (legacy) {
@@ -2609,20 +2618,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
 
        head->num_releases = cpu_to_le16(releases);
 
-       /* time stamp */
-       {
-               struct ceph_timespec ts;
-               ceph_encode_timespec64(&ts, &req->r_stamp);
-               ceph_encode_copy(&p, &ts, sizeof(ts));
-       }
-
-       /* gid list */
-       if (!legacy) {
-               ceph_encode_32(&p, req->r_cred->group_info->ngroups);
-               for (i = 0; i < req->r_cred->group_info->ngroups; i++)
-                       ceph_encode_64(&p, from_kgid(&init_user_ns,
-                                      req->r_cred->group_info->gid[i]));
-       }
+       encode_timestamp_and_gids(&p, req);
 
        if (WARN_ON_ONCE(p > end)) {
                ceph_msg_put(msg);
@@ -2730,13 +2726,8 @@ static int __prepare_send_request(struct ceph_mds_session *session,
                /* remove cap/dentry releases from message */
                rhead->num_releases = 0;
 
-               /* time stamp */
                p = msg->front.iov_base + req->r_request_release_offset;
-               {
-                       struct ceph_timespec ts;
-                       ceph_encode_timespec64(&ts, &req->r_stamp);
-                       ceph_encode_copy(&p, &ts, sizeof(ts));
-               }
+               encode_timestamp_and_gids(&p, req);
 
                msg->front.iov_len = p - msg->front.iov_base;
                msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
index b9df85506938d55fa86ac2ceaf19ad61ad188b43..5d39129406ea66668a62a4d9d990bb89f8ad3d49 100644 (file)
@@ -3740,7 +3740,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
 
        if (!ses->binding) {
                ses->capabilities = server->capabilities;
-               if (linuxExtEnabled == 0)
+               if (!linuxExtEnabled)
                        ses->capabilities &= (~server->vals->cap_unix);
 
                if (ses->auth_key.response) {
index 6ad6ba5f6ebeef20bcf68f2d3ae27e432dcd4d04..0fdb0de7ff861b89e90ebc43b40921f79411eef2 100644 (file)
@@ -1260,7 +1260,8 @@ void dfs_cache_del_vol(const char *fullpath)
        vi = find_vol(fullpath);
        spin_unlock(&vol_list_lock);
 
-       kref_put(&vi->refcnt, vol_release);
+       if (!IS_ERR(vi))
+               kref_put(&vi->refcnt, vol_release);
 }
 
 /**
index 0afccbbed2e65363ef029eb6c5caf3af2cb77a70..076bcadc756a751ffaa43b55a107404b33b203fa 100644 (file)
@@ -303,8 +303,6 @@ do {                                                                        \
 int
 smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx)
 {
-       int rc = 0;
-
        memcpy(new_ctx, ctx, sizeof(*ctx));
        new_ctx->prepath = NULL;
        new_ctx->mount_options = NULL;
@@ -327,7 +325,7 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx
        DUP_CTX_STR(nodename);
        DUP_CTX_STR(iocharset);
 
-       return rc;
+       return 0;
 }
 
 static int
index 067eb44c7baa863c1e7ccd2c2f599be0b067f320..794fc3b68b4f96905eef5c0d6e27b03656753c1b 100644 (file)
@@ -3248,7 +3248,7 @@ close_exit:
        free_rsp_buf(resp_buftype, rsp);
 
        /* retry close in a worker thread if this one is interrupted */
-       if (rc == -EINTR) {
+       if (is_interrupt_error(rc)) {
                int tmp_rc;
 
                tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid,
index 204a622b89ed3575d3e5cda23e115341615afb7a..d85edf5d1429418728d9fa44fac2a2d9a6df7684 100644 (file)
@@ -424,7 +424,7 @@ struct smb2_rdma_transform_capabilities_context {
        __le16  TransformCount;
        __u16   Reserved1;
        __u32   Reserved2;
-       __le16  RDMATransformIds[1];
+       __le16  RDMATransformIds[];
 } __packed;
 
 /* Signing algorithms */
index 1a0a827a7f34578984f6185c9d235d4248bae8a7..be799040a4154ab075a7aeb104309730ce8ddc8e 100644 (file)
@@ -372,20 +372,3 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
        }
        return err;
 }
-
-int __ext4_handle_dirty_super(const char *where, unsigned int line,
-                             handle_t *handle, struct super_block *sb)
-{
-       struct buffer_head *bh = EXT4_SB(sb)->s_sbh;
-       int err = 0;
-
-       ext4_superblock_csum_set(sb);
-       if (ext4_handle_valid(handle)) {
-               err = jbd2_journal_dirty_metadata(handle, bh);
-               if (err)
-                       ext4_journal_abort_handle(where, line, __func__,
-                                                 bh, handle, err);
-       } else
-               mark_buffer_dirty(bh);
-       return err;
-}
index a124c68b0c75e10938578dab8f257c09e70cff86..0d2fa423b7adbea8e4e12eec74552ee71884b861 100644 (file)
@@ -244,9 +244,6 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
                                 handle_t *handle, struct inode *inode,
                                 struct buffer_head *bh);
 
-int __ext4_handle_dirty_super(const char *where, unsigned int line,
-                             handle_t *handle, struct super_block *sb);
-
 #define ext4_journal_get_write_access(handle, bh) \
        __ext4_journal_get_write_access(__func__, __LINE__, (handle), (bh))
 #define ext4_forget(handle, is_metadata, inode, bh, block_nr) \
@@ -257,8 +254,6 @@ int __ext4_handle_dirty_super(const char *where, unsigned int line,
 #define ext4_handle_dirty_metadata(handle, inode, bh) \
        __ext4_handle_dirty_metadata(__func__, __LINE__, (handle), (inode), \
                                     (bh))
-#define ext4_handle_dirty_super(handle, sb) \
-       __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb))
 
 handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
                                  int type, int blocks, int rsv_blocks,
index 4fcc21c25e79399c8b3ae9069d93d063e78d50d2..0a14a7c87bf82e3b20748cc627fb40d03dad6268 100644 (file)
@@ -604,13 +604,13 @@ void ext4_fc_track_range(handle_t *handle, struct inode *inode, ext4_lblk_t star
        trace_ext4_fc_track_range(inode, start, end, ret);
 }
 
-static void ext4_fc_submit_bh(struct super_block *sb)
+static void ext4_fc_submit_bh(struct super_block *sb, bool is_tail)
 {
        int write_flags = REQ_SYNC;
        struct buffer_head *bh = EXT4_SB(sb)->s_fc_bh;
 
-       /* TODO: REQ_FUA | REQ_PREFLUSH is unnecessarily expensive. */
-       if (test_opt(sb, BARRIER))
+       /* Add REQ_FUA | REQ_PREFLUSH only its tail */
+       if (test_opt(sb, BARRIER) && is_tail)
                write_flags |= REQ_FUA | REQ_PREFLUSH;
        lock_buffer(bh);
        set_buffer_dirty(bh);
@@ -684,7 +684,7 @@ static u8 *ext4_fc_reserve_space(struct super_block *sb, int len, u32 *crc)
                *crc = ext4_chksum(sbi, *crc, tl, sizeof(*tl));
        if (pad_len > 0)
                ext4_fc_memzero(sb, tl + 1, pad_len, crc);
-       ext4_fc_submit_bh(sb);
+       ext4_fc_submit_bh(sb, false);
 
        ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh);
        if (ret)
@@ -741,7 +741,7 @@ static int ext4_fc_write_tail(struct super_block *sb, u32 crc)
        tail.fc_crc = cpu_to_le32(crc);
        ext4_fc_memcpy(sb, dst, &tail.fc_crc, sizeof(tail.fc_crc), NULL);
 
-       ext4_fc_submit_bh(sb);
+       ext4_fc_submit_bh(sb, true);
 
        return 0;
 }
@@ -1268,7 +1268,7 @@ static void ext4_fc_cleanup(journal_t *journal, int full)
        list_splice_init(&sbi->s_fc_dentry_q[FC_Q_STAGING],
                                &sbi->s_fc_dentry_q[FC_Q_MAIN]);
        list_splice_init(&sbi->s_fc_q[FC_Q_STAGING],
-                               &sbi->s_fc_q[FC_Q_STAGING]);
+                               &sbi->s_fc_q[FC_Q_MAIN]);
 
        ext4_clear_mount_flag(sb, EXT4_MF_FC_COMMITTING);
        ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
@@ -1318,14 +1318,14 @@ static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl)
        entry.len = darg.dname_len;
        inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
 
-       if (IS_ERR_OR_NULL(inode)) {
+       if (IS_ERR(inode)) {
                jbd_debug(1, "Inode %d not found", darg.ino);
                return 0;
        }
 
        old_parent = ext4_iget(sb, darg.parent_ino,
                                EXT4_IGET_NORMAL);
-       if (IS_ERR_OR_NULL(old_parent)) {
+       if (IS_ERR(old_parent)) {
                jbd_debug(1, "Dir with inode  %d not found", darg.parent_ino);
                iput(inode);
                return 0;
@@ -1410,7 +1410,7 @@ static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl)
                        darg.parent_ino, darg.dname_len);
 
        inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
-       if (IS_ERR_OR_NULL(inode)) {
+       if (IS_ERR(inode)) {
                jbd_debug(1, "Inode not found.");
                return 0;
        }
@@ -1466,10 +1466,11 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
        trace_ext4_fc_replay(sb, tag, ino, 0, 0);
 
        inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
-       if (!IS_ERR_OR_NULL(inode)) {
+       if (!IS_ERR(inode)) {
                ext4_ext_clear_bb(inode);
                iput(inode);
        }
+       inode = NULL;
 
        ext4_fc_record_modified_inode(sb, ino);
 
@@ -1512,7 +1513,7 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
 
        /* Given that we just wrote the inode on disk, this SHOULD succeed. */
        inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
-       if (IS_ERR_OR_NULL(inode)) {
+       if (IS_ERR(inode)) {
                jbd_debug(1, "Inode not found.");
                return -EFSCORRUPTED;
        }
@@ -1564,7 +1565,7 @@ static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl)
                goto out;
 
        inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
-       if (IS_ERR_OR_NULL(inode)) {
+       if (IS_ERR(inode)) {
                jbd_debug(1, "inode %d not found.", darg.ino);
                inode = NULL;
                ret = -EINVAL;
@@ -1577,7 +1578,7 @@ static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl)
                 * dot and dot dot dirents are setup properly.
                 */
                dir = ext4_iget(sb, darg.parent_ino, EXT4_IGET_NORMAL);
-               if (IS_ERR_OR_NULL(dir)) {
+               if (IS_ERR(dir)) {
                        jbd_debug(1, "Dir %d not found.", darg.ino);
                        goto out;
                }
@@ -1653,7 +1654,7 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
 
        inode = ext4_iget(sb, le32_to_cpu(fc_add_ex->fc_ino),
                                EXT4_IGET_NORMAL);
-       if (IS_ERR_OR_NULL(inode)) {
+       if (IS_ERR(inode)) {
                jbd_debug(1, "Inode not found.");
                return 0;
        }
@@ -1777,7 +1778,7 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl)
                le32_to_cpu(lrange->fc_ino), cur, remaining);
 
        inode = ext4_iget(sb, le32_to_cpu(lrange->fc_ino), EXT4_IGET_NORMAL);
-       if (IS_ERR_OR_NULL(inode)) {
+       if (IS_ERR(inode)) {
                jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange->fc_ino));
                return 0;
        }
@@ -1832,7 +1833,7 @@ static void ext4_fc_set_bitmaps_and_counters(struct super_block *sb)
        for (i = 0; i < state->fc_modified_inodes_used; i++) {
                inode = ext4_iget(sb, state->fc_modified_inodes[i],
                        EXT4_IGET_NORMAL);
-               if (IS_ERR_OR_NULL(inode)) {
+               if (IS_ERR(inode)) {
                        jbd_debug(1, "Inode %d not found.",
                                state->fc_modified_inodes[i]);
                        continue;
@@ -1849,7 +1850,7 @@ static void ext4_fc_set_bitmaps_and_counters(struct super_block *sb)
 
                        if (ret > 0) {
                                path = ext4_find_extent(inode, map.m_lblk, NULL, 0);
-                               if (!IS_ERR_OR_NULL(path)) {
+                               if (!IS_ERR(path)) {
                                        for (j = 0; j < path->p_depth; j++)
                                                ext4_mb_mark_bb(inode->i_sb,
                                                        path[j].p_block, 1, 1);
index 3ed8c048fb12c547775850ba62d364bc08362c20..349b27f0dda0cba64cc10575bb233b9b52de2bd4 100644 (file)
@@ -809,9 +809,12 @@ static int ext4_sample_last_mounted(struct super_block *sb,
        err = ext4_journal_get_write_access(handle, sbi->s_sbh);
        if (err)
                goto out_journal;
-       strlcpy(sbi->s_es->s_last_mounted, cp,
+       lock_buffer(sbi->s_sbh);
+       strncpy(sbi->s_es->s_last_mounted, cp,
                sizeof(sbi->s_es->s_last_mounted));
-       ext4_handle_dirty_super(handle, sb);
+       ext4_superblock_csum_set(sb);
+       unlock_buffer(sbi->s_sbh);
+       ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
 out_journal:
        ext4_journal_stop(handle);
 out:
index 27946882d4ce45b7b544bce1b28a26eef006cd64..c173c840585618b0be404937154dd8516eccf60a 100644 (file)
@@ -5150,9 +5150,13 @@ static int ext4_do_update_inode(handle_t *handle,
                err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
                if (err)
                        goto out_brelse;
+               lock_buffer(EXT4_SB(sb)->s_sbh);
                ext4_set_feature_large_file(sb);
+               ext4_superblock_csum_set(sb);
+               unlock_buffer(EXT4_SB(sb)->s_sbh);
                ext4_handle_sync(handle);
-               err = ext4_handle_dirty_super(handle, sb);
+               err = ext4_handle_dirty_metadata(handle, NULL,
+                                                EXT4_SB(sb)->s_sbh);
        }
        ext4_update_inode_fsync_trans(handle, inode, need_datasync);
 out_brelse:
index 524e134324475e4fffd769b30e5b13bc5cb258f9..d9665d2f82db845175e290b70d1ea7cb26f9b938 100644 (file)
@@ -1157,7 +1157,10 @@ resizefs_out:
                        err = ext4_journal_get_write_access(handle, sbi->s_sbh);
                        if (err)
                                goto pwsalt_err_journal;
+                       lock_buffer(sbi->s_sbh);
                        generate_random_uuid(sbi->s_es->s_encrypt_pw_salt);
+                       ext4_superblock_csum_set(sb);
+                       unlock_buffer(sbi->s_sbh);
                        err = ext4_handle_dirty_metadata(handle, NULL,
                                                         sbi->s_sbh);
                pwsalt_err_journal:
index b17a082b7db15298e99813cb22d07fd4d0c2e458..cf652ba3e74d29f122b960bab6dc2aa93bb4945c 100644 (file)
@@ -2976,14 +2976,17 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
            (le32_to_cpu(sbi->s_es->s_inodes_count))) {
                /* Insert this inode at the head of the on-disk orphan list */
                NEXT_ORPHAN(inode) = le32_to_cpu(sbi->s_es->s_last_orphan);
+               lock_buffer(sbi->s_sbh);
                sbi->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
+               ext4_superblock_csum_set(sb);
+               unlock_buffer(sbi->s_sbh);
                dirty = true;
        }
        list_add(&EXT4_I(inode)->i_orphan, &sbi->s_orphan);
        mutex_unlock(&sbi->s_orphan_lock);
 
        if (dirty) {
-               err = ext4_handle_dirty_super(handle, sb);
+               err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
                rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
                if (!err)
                        err = rc;
@@ -3059,9 +3062,12 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
                        mutex_unlock(&sbi->s_orphan_lock);
                        goto out_brelse;
                }
+               lock_buffer(sbi->s_sbh);
                sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
+               ext4_superblock_csum_set(inode->i_sb);
+               unlock_buffer(sbi->s_sbh);
                mutex_unlock(&sbi->s_orphan_lock);
-               err = ext4_handle_dirty_super(handle, inode->i_sb);
+               err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
        } else {
                struct ext4_iloc iloc2;
                struct inode *i_prev =
@@ -3593,9 +3599,6 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
                        return retval2;
                }
        }
-       brelse(ent->bh);
-       ent->bh = NULL;
-
        return retval;
 }
 
@@ -3794,6 +3797,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                }
        }
 
+       old_file_type = old.de->file_type;
        if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir))
                ext4_handle_sync(handle);
 
@@ -3821,7 +3825,6 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
        force_reread = (new.dir->i_ino == old.dir->i_ino &&
                        ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA));
 
-       old_file_type = old.de->file_type;
        if (whiteout) {
                /*
                 * Do this before adding a new entry, so the old entry is sure
@@ -3919,15 +3922,19 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
        retval = 0;
 
 end_rename:
-       brelse(old.dir_bh);
-       brelse(old.bh);
-       brelse(new.bh);
        if (whiteout) {
-               if (retval)
+               if (retval) {
+                       ext4_setent(handle, &old,
+                               old.inode->i_ino, old_file_type);
                        drop_nlink(whiteout);
+               }
                unlock_new_inode(whiteout);
                iput(whiteout);
+
        }
+       brelse(old.dir_bh);
+       brelse(old.bh);
+       brelse(new.bh);
        if (handle)
                ext4_journal_stop(handle);
        return retval;
index 928700d57eb67e5bc01340af2dd8826a6e9c6718..bd0d185654f3357cdc7a5826a6d3afa87c052dff 100644 (file)
@@ -899,8 +899,11 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
        EXT4_SB(sb)->s_gdb_count++;
        ext4_kvfree_array_rcu(o_group_desc);
 
+       lock_buffer(EXT4_SB(sb)->s_sbh);
        le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
-       err = ext4_handle_dirty_super(handle, sb);
+       ext4_superblock_csum_set(sb);
+       unlock_buffer(EXT4_SB(sb)->s_sbh);
+       err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
        if (err)
                ext4_std_error(sb, err);
        return err;
@@ -1384,6 +1387,7 @@ static void ext4_update_super(struct super_block *sb,
        reserved_blocks *= blocks_count;
        do_div(reserved_blocks, 100);
 
+       lock_buffer(sbi->s_sbh);
        ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count);
        ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks);
        le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) *
@@ -1421,6 +1425,8 @@ static void ext4_update_super(struct super_block *sb,
         * active. */
        ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
                                reserved_blocks);
+       ext4_superblock_csum_set(sb);
+       unlock_buffer(sbi->s_sbh);
 
        /* Update the free space counts */
        percpu_counter_add(&sbi->s_freeclusters_counter,
@@ -1515,7 +1521,7 @@ static int ext4_flex_group_add(struct super_block *sb,
 
        ext4_update_super(sb, flex_gd);
 
-       err = ext4_handle_dirty_super(handle, sb);
+       err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
 
 exit_journal:
        err2 = ext4_journal_stop(handle);
@@ -1717,15 +1723,18 @@ static int ext4_group_extend_no_check(struct super_block *sb,
                goto errout;
        }
 
+       lock_buffer(EXT4_SB(sb)->s_sbh);
        ext4_blocks_count_set(es, o_blocks_count + add);
        ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add);
+       ext4_superblock_csum_set(sb);
+       unlock_buffer(EXT4_SB(sb)->s_sbh);
        ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
                   o_blocks_count + add);
        /* We add the blocks to the bitmap and set the group need init bit */
        err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
        if (err)
                goto errout;
-       ext4_handle_dirty_super(handle, sb);
+       ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
        ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
                   o_blocks_count + add);
 errout:
@@ -1874,12 +1883,15 @@ static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
        if (err)
                goto errout;
 
+       lock_buffer(sbi->s_sbh);
        ext4_clear_feature_resize_inode(sb);
        ext4_set_feature_meta_bg(sb);
        sbi->s_es->s_first_meta_bg =
                cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count));
+       ext4_superblock_csum_set(sb);
+       unlock_buffer(sbi->s_sbh);
 
-       err = ext4_handle_dirty_super(handle, sb);
+       err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
        if (err) {
                ext4_std_error(sb, err);
                goto errout;
index 21121787c874e186ed547177adcffdebbd3891a6..9a6f9875aa3499fb7ac3a48a062f933b162aa2b9 100644 (file)
@@ -65,7 +65,8 @@ static struct ratelimit_state ext4_mount_msg_ratelimit;
 static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
                             unsigned long journal_devnum);
 static int ext4_show_options(struct seq_file *seq, struct dentry *root);
-static int ext4_commit_super(struct super_block *sb, int sync);
+static void ext4_update_super(struct super_block *sb);
+static int ext4_commit_super(struct super_block *sb);
 static int ext4_mark_recovery_complete(struct super_block *sb,
                                        struct ext4_super_block *es);
 static int ext4_clear_journal_err(struct super_block *sb,
@@ -586,15 +587,12 @@ static int ext4_errno_to_code(int errno)
        return EXT4_ERR_UNKNOWN;
 }
 
-static void __save_error_info(struct super_block *sb, int error,
-                             __u32 ino, __u64 block,
-                             const char *func, unsigned int line)
+static void save_error_info(struct super_block *sb, int error,
+                           __u32 ino, __u64 block,
+                           const char *func, unsigned int line)
 {
        struct ext4_sb_info *sbi = EXT4_SB(sb);
 
-       EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
-       if (bdev_read_only(sb->s_bdev))
-               return;
        /* We default to EFSCORRUPTED error... */
        if (error == 0)
                error = EFSCORRUPTED;
@@ -618,15 +616,6 @@ static void __save_error_info(struct super_block *sb, int error,
        spin_unlock(&sbi->s_error_lock);
 }
 
-static void save_error_info(struct super_block *sb, int error,
-                           __u32 ino, __u64 block,
-                           const char *func, unsigned int line)
-{
-       __save_error_info(sb, error, ino, block, func, line);
-       if (!bdev_read_only(sb->s_bdev))
-               ext4_commit_super(sb, 1);
-}
-
 /* Deal with the reporting of failure conditions on a filesystem such as
  * inconsistencies detected or read IO failures.
  *
@@ -647,19 +636,40 @@ static void save_error_info(struct super_block *sb, int error,
  * used to deal with unrecoverable failures such as journal IO errors or ENOMEM
  * at a critical moment in log management.
  */
-static void ext4_handle_error(struct super_block *sb, bool force_ro)
+static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
+                             __u32 ino, __u64 block,
+                             const char *func, unsigned int line)
 {
        journal_t *journal = EXT4_SB(sb)->s_journal;
+       bool continue_fs = !force_ro && test_opt(sb, ERRORS_CONT);
 
+       EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
        if (test_opt(sb, WARN_ON_ERROR))
                WARN_ON_ONCE(1);
 
-       if (sb_rdonly(sb) || (!force_ro && test_opt(sb, ERRORS_CONT)))
+       if (!continue_fs && !sb_rdonly(sb)) {
+               ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
+               if (journal)
+                       jbd2_journal_abort(journal, -EIO);
+       }
+
+       if (!bdev_read_only(sb->s_bdev)) {
+               save_error_info(sb, error, ino, block, func, line);
+               /*
+                * In case the fs should keep running, we need to writeout
+                * superblock through the journal. Due to lock ordering
+                * constraints, it may not be safe to do it right here so we
+                * defer superblock flushing to a workqueue.
+                */
+               if (continue_fs)
+                       schedule_work(&EXT4_SB(sb)->s_error_work);
+               else
+                       ext4_commit_super(sb);
+       }
+
+       if (sb_rdonly(sb) || continue_fs)
                return;
 
-       ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
-       if (journal)
-               jbd2_journal_abort(journal, -EIO);
        /*
         * We force ERRORS_RO behavior when system is rebooting. Otherwise we
         * could panic during 'reboot -f' as the underlying device got already
@@ -682,8 +692,39 @@ static void flush_stashed_error_work(struct work_struct *work)
 {
        struct ext4_sb_info *sbi = container_of(work, struct ext4_sb_info,
                                                s_error_work);
+       journal_t *journal = sbi->s_journal;
+       handle_t *handle;
 
-       ext4_commit_super(sbi->s_sb, 1);
+       /*
+        * If the journal is still running, we have to write out superblock
+        * through the journal to avoid collisions of other journalled sb
+        * updates.
+        *
+        * We use directly jbd2 functions here to avoid recursing back into
+        * ext4 error handling code during handling of previous errors.
+        */
+       if (!sb_rdonly(sbi->s_sb) && journal) {
+               handle = jbd2_journal_start(journal, 1);
+               if (IS_ERR(handle))
+                       goto write_directly;
+               if (jbd2_journal_get_write_access(handle, sbi->s_sbh)) {
+                       jbd2_journal_stop(handle);
+                       goto write_directly;
+               }
+               ext4_update_super(sbi->s_sb);
+               if (jbd2_journal_dirty_metadata(handle, sbi->s_sbh)) {
+                       jbd2_journal_stop(handle);
+                       goto write_directly;
+               }
+               jbd2_journal_stop(handle);
+               return;
+       }
+write_directly:
+       /*
+        * Write through journal failed. Write sb directly to get error info
+        * out and hope for the best.
+        */
+       ext4_commit_super(sbi->s_sb);
 }
 
 #define ext4_error_ratelimit(sb)                                       \
@@ -710,8 +751,7 @@ void __ext4_error(struct super_block *sb, const char *function,
                       sb->s_id, function, line, current->comm, &vaf);
                va_end(args);
        }
-       save_error_info(sb, error, 0, block, function, line);
-       ext4_handle_error(sb, force_ro);
+       ext4_handle_error(sb, force_ro, error, 0, block, function, line);
 }
 
 void __ext4_error_inode(struct inode *inode, const char *function,
@@ -741,9 +781,8 @@ void __ext4_error_inode(struct inode *inode, const char *function,
                               current->comm, &vaf);
                va_end(args);
        }
-       save_error_info(inode->i_sb, error, inode->i_ino, block,
-                       function, line);
-       ext4_handle_error(inode->i_sb, false);
+       ext4_handle_error(inode->i_sb, false, error, inode->i_ino, block,
+                         function, line);
 }
 
 void __ext4_error_file(struct file *file, const char *function,
@@ -780,9 +819,8 @@ void __ext4_error_file(struct file *file, const char *function,
                               current->comm, path, &vaf);
                va_end(args);
        }
-       save_error_info(inode->i_sb, EFSCORRUPTED, inode->i_ino, block,
-                       function, line);
-       ext4_handle_error(inode->i_sb, false);
+       ext4_handle_error(inode->i_sb, false, EFSCORRUPTED, inode->i_ino, block,
+                         function, line);
 }
 
 const char *ext4_decode_error(struct super_block *sb, int errno,
@@ -849,8 +887,7 @@ void __ext4_std_error(struct super_block *sb, const char *function,
                       sb->s_id, function, line, errstr);
        }
 
-       save_error_info(sb, -errno, 0, 0, function, line);
-       ext4_handle_error(sb, false);
+       ext4_handle_error(sb, false, -errno, 0, 0, function, line);
 }
 
 void __ext4_msg(struct super_block *sb,
@@ -944,13 +981,16 @@ __acquires(bitlock)
        if (test_opt(sb, ERRORS_CONT)) {
                if (test_opt(sb, WARN_ON_ERROR))
                        WARN_ON_ONCE(1);
-               __save_error_info(sb, EFSCORRUPTED, ino, block, function, line);
-               schedule_work(&EXT4_SB(sb)->s_error_work);
+               EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
+               if (!bdev_read_only(sb->s_bdev)) {
+                       save_error_info(sb, EFSCORRUPTED, ino, block, function,
+                                       line);
+                       schedule_work(&EXT4_SB(sb)->s_error_work);
+               }
                return;
        }
        ext4_unlock_group(sb, grp);
-       save_error_info(sb, EFSCORRUPTED, ino, block, function, line);
-       ext4_handle_error(sb, false);
+       ext4_handle_error(sb, false, EFSCORRUPTED, ino, block, function, line);
        /*
         * We only get here in the ERRORS_RO case; relocking the group
         * may be dangerous, but nothing bad will happen since the
@@ -1152,7 +1192,7 @@ static void ext4_put_super(struct super_block *sb)
                es->s_state = cpu_to_le16(sbi->s_mount_state);
        }
        if (!sb_rdonly(sb))
-               ext4_commit_super(sb, 1);
+               ext4_commit_super(sb);
 
        rcu_read_lock();
        group_desc = rcu_dereference(sbi->s_group_desc);
@@ -2642,7 +2682,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
        if (sbi->s_journal)
                ext4_set_feature_journal_needs_recovery(sb);
 
-       err = ext4_commit_super(sb, 1);
+       err = ext4_commit_super(sb);
 done:
        if (test_opt(sb, DEBUG))
                printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
@@ -4868,7 +4908,7 @@ no_journal:
        if (DUMMY_ENCRYPTION_ENABLED(sbi) && !sb_rdonly(sb) &&
            !ext4_has_feature_encrypt(sb)) {
                ext4_set_feature_encrypt(sb);
-               ext4_commit_super(sb, 1);
+               ext4_commit_super(sb);
        }
 
        /*
@@ -5418,7 +5458,7 @@ static int ext4_load_journal(struct super_block *sb,
                es->s_journal_dev = cpu_to_le32(journal_devnum);
 
                /* Make sure we flush the recovery flag to disk. */
-               ext4_commit_super(sb, 1);
+               ext4_commit_super(sb);
        }
 
        return 0;
@@ -5428,16 +5468,14 @@ err_out:
        return err;
 }
 
-static int ext4_commit_super(struct super_block *sb, int sync)
+/* Copy state of EXT4_SB(sb) into buffer for on-disk superblock */
+static void ext4_update_super(struct super_block *sb)
 {
        struct ext4_sb_info *sbi = EXT4_SB(sb);
-       struct ext4_super_block *es = EXT4_SB(sb)->s_es;
-       struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
-       int error = 0;
-
-       if (!sbh || block_device_ejected(sb))
-               return error;
+       struct ext4_super_block *es = sbi->s_es;
+       struct buffer_head *sbh = sbi->s_sbh;
 
+       lock_buffer(sbh);
        /*
         * If the file system is mounted read-only, don't update the
         * superblock write time.  This avoids updating the superblock
@@ -5451,17 +5489,17 @@ static int ext4_commit_super(struct super_block *sb, int sync)
        if (!(sb->s_flags & SB_RDONLY))
                ext4_update_tstamp(es, s_wtime);
        es->s_kbytes_written =
-               cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
+               cpu_to_le64(sbi->s_kbytes_written +
                    ((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
-                     EXT4_SB(sb)->s_sectors_written_start) >> 1));
-       if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
+                     sbi->s_sectors_written_start) >> 1));
+       if (percpu_counter_initialized(&sbi->s_freeclusters_counter))
                ext4_free_blocks_count_set(es,
-                       EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
-                               &EXT4_SB(sb)->s_freeclusters_counter)));
-       if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
+                       EXT4_C2B(sbi, percpu_counter_sum_positive(
+                               &sbi->s_freeclusters_counter)));
+       if (percpu_counter_initialized(&sbi->s_freeinodes_counter))
                es->s_free_inodes_count =
                        cpu_to_le32(percpu_counter_sum_positive(
-                               &EXT4_SB(sb)->s_freeinodes_counter));
+                               &sbi->s_freeinodes_counter));
        /* Copy error information to the on-disk superblock */
        spin_lock(&sbi->s_error_lock);
        if (sbi->s_add_error_count > 0) {
@@ -5502,10 +5540,20 @@ static int ext4_commit_super(struct super_block *sb, int sync)
        }
        spin_unlock(&sbi->s_error_lock);
 
-       BUFFER_TRACE(sbh, "marking dirty");
        ext4_superblock_csum_set(sb);
-       if (sync)
-               lock_buffer(sbh);
+       unlock_buffer(sbh);
+}
+
+static int ext4_commit_super(struct super_block *sb)
+{
+       struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
+       int error = 0;
+
+       if (!sbh || block_device_ejected(sb))
+               return error;
+
+       ext4_update_super(sb);
+
        if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
                /*
                 * Oh, dear.  A previous attempt to write the
@@ -5520,17 +5568,15 @@ static int ext4_commit_super(struct super_block *sb, int sync)
                clear_buffer_write_io_error(sbh);
                set_buffer_uptodate(sbh);
        }
+       BUFFER_TRACE(sbh, "marking dirty");
        mark_buffer_dirty(sbh);
-       if (sync) {
-               unlock_buffer(sbh);
-               error = __sync_dirty_buffer(sbh,
-                       REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
-               if (buffer_write_io_error(sbh)) {
-                       ext4_msg(sb, KERN_ERR, "I/O error while writing "
-                              "superblock");
-                       clear_buffer_write_io_error(sbh);
-                       set_buffer_uptodate(sbh);
-               }
+       error = __sync_dirty_buffer(sbh,
+               REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
+       if (buffer_write_io_error(sbh)) {
+               ext4_msg(sb, KERN_ERR, "I/O error while writing "
+                      "superblock");
+               clear_buffer_write_io_error(sbh);
+               set_buffer_uptodate(sbh);
        }
        return error;
 }
@@ -5561,7 +5607,7 @@ static int ext4_mark_recovery_complete(struct super_block *sb,
 
        if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
                ext4_clear_feature_journal_needs_recovery(sb);
-               ext4_commit_super(sb, 1);
+               ext4_commit_super(sb);
        }
 out:
        jbd2_journal_unlock_updates(journal);
@@ -5603,7 +5649,7 @@ static int ext4_clear_journal_err(struct super_block *sb,
 
                EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
                es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
-               ext4_commit_super(sb, 1);
+               ext4_commit_super(sb);
 
                jbd2_journal_clear_err(journal);
                jbd2_journal_update_sb_errno(journal);
@@ -5705,7 +5751,7 @@ static int ext4_freeze(struct super_block *sb)
                ext4_clear_feature_journal_needs_recovery(sb);
        }
 
-       error = ext4_commit_super(sb, 1);
+       error = ext4_commit_super(sb);
 out:
        if (journal)
                /* we rely on upper layer to stop further updates */
@@ -5727,7 +5773,7 @@ static int ext4_unfreeze(struct super_block *sb)
                ext4_set_feature_journal_needs_recovery(sb);
        }
 
-       ext4_commit_super(sb, 1);
+       ext4_commit_super(sb);
        return 0;
 }
 
@@ -5987,7 +6033,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        }
 
        if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) {
-               err = ext4_commit_super(sb, 1);
+               err = ext4_commit_super(sb);
                if (err)
                        goto restore_opts;
        }
index 4e3b1f8c2e81eaaf5489ee96f2c63a5d6e8681da..372208500f4e766f0167ce4597f777dafc6891f2 100644 (file)
@@ -792,8 +792,11 @@ static void ext4_xattr_update_super_block(handle_t *handle,
 
        BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
        if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
+               lock_buffer(EXT4_SB(sb)->s_sbh);
                ext4_set_feature_xattr(sb);
-               ext4_handle_dirty_super(handle, sb);
+               ext4_superblock_csum_set(sb);
+               unlock_buffer(EXT4_SB(sb)->s_sbh);
+               ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
        }
 }
 
index c0b60961c67222fe95e60641313192f494dcde48..dab120b71e44d9faeedf56371793067ab92984e6 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -21,7 +21,6 @@
 #include <linux/rcupdate.h>
 #include <linux/close_range.h>
 #include <net/sock.h>
-#include <linux/io_uring.h>
 
 unsigned int sysctl_nr_open __read_mostly = 1024*1024;
 unsigned int sysctl_nr_open_min = BITS_PER_LONG;
@@ -428,7 +427,6 @@ void exit_files(struct task_struct *tsk)
        struct files_struct * files = tsk->files;
 
        if (files) {
-               io_uring_files_cancel(files);
                task_lock(tsk);
                tsk->files = NULL;
                task_unlock(tsk);
index 7e35283fc0b1040d968786cdcaa4a800d9257ddc..985a9e3f976d3bf1e8b6da3aebbb6495fe875dc9 100644 (file)
@@ -262,6 +262,7 @@ struct io_ring_ctx {
                unsigned int            drain_next: 1;
                unsigned int            eventfd_async: 1;
                unsigned int            restricted: 1;
+               unsigned int            sqo_dead: 1;
 
                /*
                 * Ring buffer of indices into array of io_uring_sqe, which is
@@ -353,6 +354,7 @@ struct io_ring_ctx {
                unsigned                cq_entries;
                unsigned                cq_mask;
                atomic_t                cq_timeouts;
+               unsigned                cq_last_tm_flush;
                unsigned long           cq_check_overflow;
                struct wait_queue_head  cq_wait;
                struct fasync_struct    *cq_fasync;
@@ -992,6 +994,13 @@ enum io_mem_account {
        ACCT_PINNED,
 };
 
+static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
+                                           struct task_struct *task);
+
+static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node);
+static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
+                       struct io_ring_ctx *ctx);
+
 static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
                             struct io_comp_state *cs);
 static void io_cqring_fill_event(struct io_kiocb *req, long res);
@@ -1098,6 +1107,9 @@ static void io_sq_thread_drop_mm_files(void)
 
 static int __io_sq_thread_acquire_files(struct io_ring_ctx *ctx)
 {
+       if (current->flags & PF_EXITING)
+               return -EFAULT;
+
        if (!current->files) {
                struct files_struct *files;
                struct nsproxy *nsproxy;
@@ -1125,6 +1137,8 @@ static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
 {
        struct mm_struct *mm;
 
+       if (current->flags & PF_EXITING)
+               return -EFAULT;
        if (current->mm)
                return 0;
 
@@ -1338,11 +1352,6 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
 
        /* order cqe stores with ring update */
        smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
-
-       if (wq_has_sleeper(&ctx->cq_wait)) {
-               wake_up_interruptible(&ctx->cq_wait);
-               kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
-       }
 }
 
 static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req)
@@ -1501,6 +1510,13 @@ static bool io_grab_identity(struct io_kiocb *req)
                spin_unlock_irq(&ctx->inflight_lock);
                req->work.flags |= IO_WQ_WORK_FILES;
        }
+       if (!(req->work.flags & IO_WQ_WORK_MM) &&
+           (def->work_flags & IO_WQ_WORK_MM)) {
+               if (id->mm != current->mm)
+                       return false;
+               mmgrab(id->mm);
+               req->work.flags |= IO_WQ_WORK_MM;
+       }
 
        return true;
 }
@@ -1509,10 +1525,8 @@ static void io_prep_async_work(struct io_kiocb *req)
 {
        const struct io_op_def *def = &io_op_defs[req->opcode];
        struct io_ring_ctx *ctx = req->ctx;
-       struct io_identity *id;
 
        io_req_init_async(req);
-       id = req->work.identity;
 
        if (req->flags & REQ_F_FORCE_ASYNC)
                req->work.flags |= IO_WQ_WORK_CONCURRENT;
@@ -1525,13 +1539,6 @@ static void io_prep_async_work(struct io_kiocb *req)
                        req->work.flags |= IO_WQ_WORK_UNBOUND;
        }
 
-       /* ->mm can never change on us */
-       if (!(req->work.flags & IO_WQ_WORK_MM) &&
-           (def->work_flags & IO_WQ_WORK_MM)) {
-               mmgrab(id->mm);
-               req->work.flags |= IO_WQ_WORK_MM;
-       }
-
        /* if we fail grabbing identity, we must COW, regrab, and retry */
        if (io_grab_identity(req))
                return;
@@ -1633,19 +1640,38 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx)
 
 static void io_flush_timeouts(struct io_ring_ctx *ctx)
 {
-       while (!list_empty(&ctx->timeout_list)) {
+       u32 seq;
+
+       if (list_empty(&ctx->timeout_list))
+               return;
+
+       seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
+
+       do {
+               u32 events_needed, events_got;
                struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
                                                struct io_kiocb, timeout.list);
 
                if (io_is_timeout_noseq(req))
                        break;
-               if (req->timeout.target_seq != ctx->cached_cq_tail
-                                       - atomic_read(&ctx->cq_timeouts))
+
+               /*
+                * Since seq can easily wrap around over time, subtract
+                * the last seq at which timeouts were flushed before comparing.
+                * Assuming not more than 2^31-1 events have happened since,
+                * these subtractions won't have wrapped, so we can check if
+                * target is in [last_seq, current_seq] by comparing the two.
+                */
+               events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
+               events_got = seq - ctx->cq_last_tm_flush;
+               if (events_got < events_needed)
                        break;
 
                list_del_init(&req->timeout.list);
                io_kill_timeout(req);
-       }
+       } while (!list_empty(&ctx->timeout_list));
+
+       ctx->cq_last_tm_flush = seq;
 }
 
 static void io_commit_cqring(struct io_ring_ctx *ctx)
@@ -1700,18 +1726,42 @@ static inline unsigned __io_cqring_events(struct io_ring_ctx *ctx)
 
 static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
 {
+       /* see waitqueue_active() comment */
+       smp_mb();
+
        if (waitqueue_active(&ctx->wait))
                wake_up(&ctx->wait);
        if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
                wake_up(&ctx->sq_data->wait);
        if (io_should_trigger_evfd(ctx))
                eventfd_signal(ctx->cq_ev_fd, 1);
+       if (waitqueue_active(&ctx->cq_wait)) {
+               wake_up_interruptible(&ctx->cq_wait);
+               kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
+       }
+}
+
+static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
+{
+       /* see waitqueue_active() comment */
+       smp_mb();
+
+       if (ctx->flags & IORING_SETUP_SQPOLL) {
+               if (waitqueue_active(&ctx->wait))
+                       wake_up(&ctx->wait);
+       }
+       if (io_should_trigger_evfd(ctx))
+               eventfd_signal(ctx->cq_ev_fd, 1);
+       if (waitqueue_active(&ctx->cq_wait)) {
+               wake_up_interruptible(&ctx->cq_wait);
+               kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
+       }
 }
 
 /* Returns true if there are no backlogged entries after the flush */
-static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
-                                    struct task_struct *tsk,
-                                    struct files_struct *files)
+static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
+                                      struct task_struct *tsk,
+                                      struct files_struct *files)
 {
        struct io_rings *rings = ctx->rings;
        struct io_kiocb *req, *tmp;
@@ -1764,6 +1814,20 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
        return all_flushed;
 }
 
+static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
+                                    struct task_struct *tsk,
+                                    struct files_struct *files)
+{
+       if (test_bit(0, &ctx->cq_check_overflow)) {
+               /* iopoll syncs against uring_lock, not completion_lock */
+               if (ctx->flags & IORING_SETUP_IOPOLL)
+                       mutex_lock(&ctx->uring_lock);
+               __io_cqring_overflow_flush(ctx, force, tsk, files);
+               if (ctx->flags & IORING_SETUP_IOPOLL)
+                       mutex_unlock(&ctx->uring_lock);
+       }
+}
+
 static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
 {
        struct io_ring_ctx *ctx = req->ctx;
@@ -2123,14 +2187,14 @@ static void __io_req_task_submit(struct io_kiocb *req)
 {
        struct io_ring_ctx *ctx = req->ctx;
 
-       if (!__io_sq_thread_acquire_mm(ctx) &&
-           !__io_sq_thread_acquire_files(ctx)) {
-               mutex_lock(&ctx->uring_lock);
+       mutex_lock(&ctx->uring_lock);
+       if (!ctx->sqo_dead &&
+           !__io_sq_thread_acquire_mm(ctx) &&
+           !__io_sq_thread_acquire_files(ctx))
                __io_queue_sqe(req, NULL);
-               mutex_unlock(&ctx->uring_lock);
-       } else {
+       else
                __io_req_task_cancel(req, -EFAULT);
-       }
+       mutex_unlock(&ctx->uring_lock);
 }
 
 static void io_req_task_submit(struct callback_head *cb)
@@ -2309,20 +2373,8 @@ static void io_double_put_req(struct io_kiocb *req)
                io_free_req(req);
 }
 
-static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
+static unsigned io_cqring_events(struct io_ring_ctx *ctx)
 {
-       if (test_bit(0, &ctx->cq_check_overflow)) {
-               /*
-                * noflush == true is from the waitqueue handler, just ensure
-                * we wake up the task, and the next invocation will flush the
-                * entries. We cannot safely to it from here.
-                */
-               if (noflush)
-                       return -1U;
-
-               io_cqring_overflow_flush(ctx, false, NULL, NULL);
-       }
-
        /* See comment at the top of this file */
        smp_rmb();
        return __io_cqring_events(ctx);
@@ -2420,8 +2472,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
        }
 
        io_commit_cqring(ctx);
-       if (ctx->flags & IORING_SETUP_SQPOLL)
-               io_cqring_ev_posted(ctx);
+       io_cqring_ev_posted_iopoll(ctx);
        io_req_free_batch_finish(ctx, &rb);
 
        if (!list_empty(&again))
@@ -2547,7 +2598,9 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
                 * If we do, we can potentially be spinning for commands that
                 * already triggered a CQE (eg in error).
                 */
-               if (io_cqring_events(ctx, false))
+               if (test_bit(0, &ctx->cq_check_overflow))
+                       __io_cqring_overflow_flush(ctx, false, NULL, NULL);
+               if (io_cqring_events(ctx))
                        break;
 
                /*
@@ -2664,6 +2717,8 @@ static bool io_rw_reissue(struct io_kiocb *req, long res)
        if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
                return false;
 
+       lockdep_assert_held(&req->ctx->uring_lock);
+
        ret = io_sq_thread_acquire_mm_files(req->ctx, req);
 
        if (io_resubmit_prep(req, ret)) {
@@ -5802,6 +5857,12 @@ static int io_timeout(struct io_kiocb *req)
        tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
        req->timeout.target_seq = tail + off;
 
+       /* Update the last seq here in case io_flush_timeouts() hasn't.
+        * This is safe because ->completion_lock is held, and submissions
+        * and completions are never mixed in the same ->completion_lock section.
+        */
+       ctx->cq_last_tm_flush = tail;
+
        /*
         * Insertion sort, ensuring the first entry in the list is always
         * the one we need first.
@@ -6822,7 +6883,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
 
        /* if we have a backlog and couldn't flush it all, return BUSY */
        if (test_bit(0, &ctx->sq_check_overflow)) {
-               if (!io_cqring_overflow_flush(ctx, false, NULL, NULL))
+               if (!__io_cqring_overflow_flush(ctx, false, NULL, NULL))
                        return -EBUSY;
        }
 
@@ -6924,7 +6985,8 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
                if (!list_empty(&ctx->iopoll_list))
                        io_do_iopoll(ctx, &nr_events, 0);
 
-               if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)))
+               if (to_submit && !ctx->sqo_dead &&
+                   likely(!percpu_ref_is_dying(&ctx->refs)))
                        ret = io_submit_sqes(ctx, to_submit);
                mutex_unlock(&ctx->uring_lock);
        }
@@ -7025,6 +7087,7 @@ static int io_sq_thread(void *data)
 
                if (sqt_spin || !time_after(jiffies, timeout)) {
                        io_run_task_work();
+                       io_sq_thread_drop_mm_files();
                        cond_resched();
                        if (sqt_spin)
                                timeout = jiffies + sqd->sq_thread_idle;
@@ -7062,6 +7125,7 @@ static int io_sq_thread(void *data)
        }
 
        io_run_task_work();
+       io_sq_thread_drop_mm_files();
 
        if (cur_css)
                io_sq_thread_unassociate_blkcg();
@@ -7085,7 +7149,7 @@ struct io_wait_queue {
        unsigned nr_timeouts;
 };
 
-static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
+static inline bool io_should_wake(struct io_wait_queue *iowq)
 {
        struct io_ring_ctx *ctx = iowq->ctx;
 
@@ -7094,7 +7158,7 @@ static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
         * started waiting. For timeouts, we always want to return to userspace,
         * regardless of event count.
         */
-       return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
+       return io_cqring_events(ctx) >= iowq->to_wait ||
                        atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
 }
 
@@ -7104,11 +7168,13 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
        struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
                                                        wq);
 
-       /* use noflush == true, as we can't safely rely on locking context */
-       if (!io_should_wake(iowq, true))
-               return -1;
-
-       return autoremove_wake_function(curr, mode, wake_flags, key);
+       /*
+        * Cannot safely flush overflowed CQEs from here, ensure we wake up
+        * the task, and the next invocation will do it.
+        */
+       if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->cq_check_overflow))
+               return autoremove_wake_function(curr, mode, wake_flags, key);
+       return -1;
 }
 
 static int io_run_task_work_sig(void)
@@ -7145,7 +7211,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
        int ret = 0;
 
        do {
-               if (io_cqring_events(ctx, false) >= min_events)
+               io_cqring_overflow_flush(ctx, false, NULL, NULL);
+               if (io_cqring_events(ctx) >= min_events)
                        return 0;
                if (!io_run_task_work())
                        break;
@@ -7173,6 +7240,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
        iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
        trace_io_uring_cqring_wait(ctx, min_events);
        do {
+               io_cqring_overflow_flush(ctx, false, NULL, NULL);
                prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
                                                TASK_INTERRUPTIBLE);
                /* make sure we run task_work before checking for signals */
@@ -7181,8 +7249,10 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                        continue;
                else if (ret < 0)
                        break;
-               if (io_should_wake(&iowq, false))
+               if (io_should_wake(&iowq))
                        break;
+               if (test_bit(0, &ctx->cq_check_overflow))
+                       continue;
                if (uts) {
                        timeout = schedule_timeout(timeout);
                        if (timeout == 0) {
@@ -7231,14 +7301,28 @@ static void io_file_ref_kill(struct percpu_ref *ref)
        complete(&data->done);
 }
 
+static void io_sqe_files_set_node(struct fixed_file_data *file_data,
+                                 struct fixed_file_ref_node *ref_node)
+{
+       spin_lock_bh(&file_data->lock);
+       file_data->node = ref_node;
+       list_add_tail(&ref_node->node, &file_data->ref_list);
+       spin_unlock_bh(&file_data->lock);
+       percpu_ref_get(&file_data->refs);
+}
+
 static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
 {
        struct fixed_file_data *data = ctx->file_data;
-       struct fixed_file_ref_node *ref_node = NULL;
+       struct fixed_file_ref_node *backup_node, *ref_node = NULL;
        unsigned nr_tables, i;
+       int ret;
 
        if (!data)
                return -ENXIO;
+       backup_node = alloc_fixed_file_ref_node(ctx);
+       if (!backup_node)
+               return -ENOMEM;
 
        spin_lock_bh(&data->lock);
        ref_node = data->node;
@@ -7250,7 +7334,18 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
 
        /* wait for all refs nodes to complete */
        flush_delayed_work(&ctx->file_put_work);
-       wait_for_completion(&data->done);
+       do {
+               ret = wait_for_completion_interruptible(&data->done);
+               if (!ret)
+                       break;
+               ret = io_run_task_work_sig();
+               if (ret < 0) {
+                       percpu_ref_resurrect(&data->refs);
+                       reinit_completion(&data->done);
+                       io_sqe_files_set_node(data, backup_node);
+                       return ret;
+               }
+       } while (1);
 
        __io_sqe_files_unregister(ctx);
        nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
@@ -7261,6 +7356,7 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
        kfree(data);
        ctx->file_data = NULL;
        ctx->nr_user_files = 0;
+       destroy_fixed_file_ref_node(backup_node);
        return 0;
 }
 
@@ -7654,12 +7750,12 @@ static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
 
        ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
        if (!ref_node)
-               return ERR_PTR(-ENOMEM);
+               return NULL;
 
        if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero,
                            0, GFP_KERNEL)) {
                kfree(ref_node);
-               return ERR_PTR(-ENOMEM);
+               return NULL;
        }
        INIT_LIST_HEAD(&ref_node->node);
        INIT_LIST_HEAD(&ref_node->file_list);
@@ -7753,16 +7849,12 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
        }
 
        ref_node = alloc_fixed_file_ref_node(ctx);
-       if (IS_ERR(ref_node)) {
+       if (!ref_node) {
                io_sqe_files_unregister(ctx);
-               return PTR_ERR(ref_node);
+               return -ENOMEM;
        }
 
-       file_data->node = ref_node;
-       spin_lock_bh(&file_data->lock);
-       list_add_tail(&ref_node->node, &file_data->ref_list);
-       spin_unlock_bh(&file_data->lock);
-       percpu_ref_get(&file_data->refs);
+       io_sqe_files_set_node(file_data, ref_node);
        return ret;
 out_fput:
        for (i = 0; i < ctx->nr_user_files; i++) {
@@ -7859,8 +7951,8 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
                return -EINVAL;
 
        ref_node = alloc_fixed_file_ref_node(ctx);
-       if (IS_ERR(ref_node))
-               return PTR_ERR(ref_node);
+       if (!ref_node)
+               return -ENOMEM;
 
        done = 0;
        fds = u64_to_user_ptr(up->fds);
@@ -7918,11 +8010,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
 
        if (needs_switch) {
                percpu_ref_kill(&data->node->refs);
-               spin_lock_bh(&data->lock);
-               list_add_tail(&ref_node->node, &data->ref_list);
-               data->node = ref_node;
-               spin_unlock_bh(&data->lock);
-               percpu_ref_get(&ctx->file_data->refs);
+               io_sqe_files_set_node(data, ref_node);
        } else
                destroy_fixed_file_ref_node(ref_node);
 
@@ -8602,7 +8690,8 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
        smp_rmb();
        if (!io_sqring_full(ctx))
                mask |= EPOLLOUT | EPOLLWRNORM;
-       if (io_cqring_events(ctx, false))
+       io_cqring_overflow_flush(ctx, false, NULL, NULL);
+       if (io_cqring_events(ctx))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        return mask;
@@ -8641,7 +8730,7 @@ static void io_ring_exit_work(struct work_struct *work)
         * as nobody else will be looking for them.
         */
        do {
-               io_iopoll_try_reap_events(ctx);
+               __io_uring_cancel_task_requests(ctx, NULL);
        } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
        io_ring_ctx_free(ctx);
 }
@@ -8657,10 +8746,14 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
 {
        mutex_lock(&ctx->uring_lock);
        percpu_ref_kill(&ctx->refs);
+
+       if (WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) && !ctx->sqo_dead))
+               ctx->sqo_dead = 1;
+
        /* if force is set, the ring is going away. always drop after that */
        ctx->cq_overflow_flushed = 1;
        if (ctx->rings)
-               io_cqring_overflow_flush(ctx, true, NULL, NULL);
+               __io_cqring_overflow_flush(ctx, true, NULL, NULL);
        mutex_unlock(&ctx->uring_lock);
 
        io_kill_timeouts(ctx, NULL, NULL);
@@ -8796,9 +8889,11 @@ static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
                enum io_wq_cancel cret;
                bool ret = false;
 
-               cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
-               if (cret != IO_WQ_CANCEL_NOTFOUND)
-                       ret = true;
+               if (ctx->io_wq) {
+                       cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb,
+                                              &cancel, true);
+                       ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
+               }
 
                /* SQPOLL thread does its own polling */
                if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
@@ -8817,6 +8912,19 @@ static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
        }
 }
 
+static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
+{
+       WARN_ON_ONCE(ctx->sqo_task != current);
+
+       mutex_lock(&ctx->uring_lock);
+       ctx->sqo_dead = 1;
+       mutex_unlock(&ctx->uring_lock);
+
+       /* make sure callers enter the ring to get error */
+       if (ctx->rings)
+               io_ring_set_wakeup_flag(ctx);
+}
+
 /*
  * We need to iteratively cancel requests, in case a request has dependent
  * hard links. These persist even for failure of cancelations, hence keep
@@ -8828,15 +8936,15 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
        struct task_struct *task = current;
 
        if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
+               /* for SQPOLL only sqo_task has task notes */
+               io_disable_sqo_submit(ctx);
                task = ctx->sq_data->thread;
                atomic_inc(&task->io_uring->in_idle);
                io_sq_thread_park(ctx->sq_data);
        }
 
        io_cancel_defer_files(ctx, task, files);
-       io_ring_submit_lock(ctx, (ctx->flags & IORING_SETUP_IOPOLL));
        io_cqring_overflow_flush(ctx, true, task, files);
-       io_ring_submit_unlock(ctx, (ctx->flags & IORING_SETUP_IOPOLL));
 
        if (!files)
                __io_uring_cancel_task_requests(ctx, task);
@@ -8909,20 +9017,12 @@ static void io_uring_del_task_file(struct file *file)
                fput(file);
 }
 
-/*
- * Drop task note for this file if we're the only ones that hold it after
- * pending fput()
- */
-static void io_uring_attempt_task_drop(struct file *file)
+static void io_uring_remove_task_files(struct io_uring_task *tctx)
 {
-       if (!current->io_uring)
-               return;
-       /*
-        * fput() is pending, will be 2 if the only other ref is our potential
-        * task file note. If the task is exiting, drop regardless of count.
-        */
-       if (fatal_signal_pending(current) || (current->flags & PF_EXITING) ||
-           atomic_long_read(&file->f_count) == 2)
+       struct file *file;
+       unsigned long index;
+
+       xa_for_each(&tctx->xa, index, file)
                io_uring_del_task_file(file);
 }
 
@@ -8934,16 +9034,12 @@ void __io_uring_files_cancel(struct files_struct *files)
 
        /* make sure overflow events are dropped */
        atomic_inc(&tctx->in_idle);
-
-       xa_for_each(&tctx->xa, index, file) {
-               struct io_ring_ctx *ctx = file->private_data;
-
-               io_uring_cancel_task_requests(ctx, files);
-               if (files)
-                       io_uring_del_task_file(file);
-       }
-
+       xa_for_each(&tctx->xa, index, file)
+               io_uring_cancel_task_requests(file->private_data, files);
        atomic_dec(&tctx->in_idle);
+
+       if (files)
+               io_uring_remove_task_files(tctx);
 }
 
 static s64 tctx_inflight(struct io_uring_task *tctx)
@@ -9005,12 +9101,41 @@ void __io_uring_task_cancel(void)
                finish_wait(&tctx->wait, &wait);
        } while (1);
 
+       finish_wait(&tctx->wait, &wait);
        atomic_dec(&tctx->in_idle);
+
+       io_uring_remove_task_files(tctx);
 }
 
 static int io_uring_flush(struct file *file, void *data)
 {
-       io_uring_attempt_task_drop(file);
+       struct io_uring_task *tctx = current->io_uring;
+       struct io_ring_ctx *ctx = file->private_data;
+
+       if (!tctx)
+               return 0;
+
+       /* we should have cancelled and erased it before PF_EXITING */
+       WARN_ON_ONCE((current->flags & PF_EXITING) &&
+                    xa_load(&tctx->xa, (unsigned long)file));
+
+       /*
+        * fput() is pending, will be 2 if the only other ref is our potential
+        * task file note. If the task is exiting, drop regardless of count.
+        */
+       if (atomic_long_read(&file->f_count) != 2)
+               return 0;
+
+       if (ctx->flags & IORING_SETUP_SQPOLL) {
+               /* there is only one file note, which is owned by sqo_task */
+               WARN_ON_ONCE((ctx->sqo_task == current) ==
+                            !xa_load(&tctx->xa, (unsigned long)file));
+
+               io_disable_sqo_submit(ctx);
+       }
+
+       if (!(ctx->flags & IORING_SETUP_SQPOLL) || ctx->sqo_task == current)
+               io_uring_del_task_file(file);
        return 0;
 }
 
@@ -9084,8 +9209,9 @@ static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
 
 #endif /* !CONFIG_MMU */
 
-static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
+static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
 {
+       int ret = 0;
        DEFINE_WAIT(wait);
 
        do {
@@ -9094,6 +9220,11 @@ static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
 
                prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
 
+               if (unlikely(ctx->sqo_dead)) {
+                       ret = -EOWNERDEAD;
+                       goto out;
+               }
+
                if (!io_sqring_full(ctx))
                        break;
 
@@ -9101,6 +9232,8 @@ static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
        } while (!signal_pending(current));
 
        finish_wait(&ctx->sqo_sq_wait, &wait);
+out:
+       return ret;
 }
 
 static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
@@ -9172,17 +9305,18 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
         */
        ret = 0;
        if (ctx->flags & IORING_SETUP_SQPOLL) {
-               if (!list_empty_careful(&ctx->cq_overflow_list)) {
-                       bool needs_lock = ctx->flags & IORING_SETUP_IOPOLL;
+               io_cqring_overflow_flush(ctx, false, NULL, NULL);
 
-                       io_ring_submit_lock(ctx, needs_lock);
-                       io_cqring_overflow_flush(ctx, false, NULL, NULL);
-                       io_ring_submit_unlock(ctx, needs_lock);
-               }
+               ret = -EOWNERDEAD;
+               if (unlikely(ctx->sqo_dead))
+                       goto out;
                if (flags & IORING_ENTER_SQ_WAKEUP)
                        wake_up(&ctx->sq_data->wait);
-               if (flags & IORING_ENTER_SQ_WAIT)
-                       io_sqpoll_wait_sq(ctx);
+               if (flags & IORING_ENTER_SQ_WAIT) {
+                       ret = io_sqpoll_wait_sq(ctx);
+                       if (ret)
+                               goto out;
+               }
                submitted = to_submit;
        } else if (to_submit) {
                ret = io_uring_add_task_file(ctx, f.file);
@@ -9601,6 +9735,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
         */
        ret = io_uring_install_fd(ctx, file);
        if (ret < 0) {
+               io_disable_sqo_submit(ctx);
                /* fput will clean it up */
                fput(file);
                return ret;
@@ -9609,6 +9744,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
        trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
        return ret;
 err:
+       io_disable_sqo_submit(ctx);
        io_ring_ctx_wait_and_kill(ctx);
        return ret;
 }
index d2db7dfe232b3ecd299187ed477ad2fd7a0fd1a5..9d33909d0f9e34d8a54c7d0d1d7b44652cc8b69e 100644 (file)
@@ -1713,8 +1713,6 @@ static int can_umount(const struct path *path, int flags)
 {
        struct mount *mnt = real_mount(path->mnt);
 
-       if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
-               return -EINVAL;
        if (!may_mount())
                return -EPERM;
        if (path->dentry != path->mnt->mnt_root)
@@ -1728,6 +1726,7 @@ static int can_umount(const struct path *path, int flags)
        return 0;
 }
 
+// caller is responsible for flags being sane
 int path_umount(struct path *path, int flags)
 {
        struct mount *mnt = real_mount(path->mnt);
@@ -1749,6 +1748,10 @@ static int ksys_umount(char __user *name, int flags)
        struct path path;
        int ret;
 
+       // basic validity checks done first
+       if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
+               return -EINVAL;
+
        if (!(flags & UMOUNT_NOFOLLOW))
                lookup_flags |= LOOKUP_FOLLOW;
        ret = user_path_at(AT_FDCWD, name, lookup_flags, &path);
index 816e1427f17eb15f8a7b5abda8c554ce56ac9a5a..04bf8066980c1d9fd32b85927dbe51ac025b3512 100644 (file)
@@ -1011,22 +1011,24 @@ nfs_delegation_find_inode_server(struct nfs_server *server,
                                 const struct nfs_fh *fhandle)
 {
        struct nfs_delegation *delegation;
-       struct inode *freeme, *res = NULL;
+       struct super_block *freeme = NULL;
+       struct inode *res = NULL;
 
        list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
                spin_lock(&delegation->lock);
                if (delegation->inode != NULL &&
                    !test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) &&
                    nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
-                       freeme = igrab(delegation->inode);
-                       if (freeme && nfs_sb_active(freeme->i_sb))
-                               res = freeme;
+                       if (nfs_sb_active(server->super)) {
+                               freeme = server->super;
+                               res = igrab(delegation->inode);
+                       }
                        spin_unlock(&delegation->lock);
                        if (res != NULL)
                                return res;
                        if (freeme) {
                                rcu_read_unlock();
-                               iput(freeme);
+                               nfs_sb_deactive(freeme);
                                rcu_read_lock();
                        }
                        return ERR_PTR(-EAGAIN);
index b840d0a91c9d8f2d7f704eb2ab1b1eb311dc1539..62d3189745cdc7461e49109156c170ca0f2c988b 100644 (file)
@@ -136,9 +136,29 @@ struct nfs_fs_context {
        } clone_data;
 };
 
-#define nfs_errorf(fc, fmt, ...) errorf(fc, fmt, ## __VA_ARGS__)
-#define nfs_invalf(fc, fmt, ...) invalf(fc, fmt, ## __VA_ARGS__)
-#define nfs_warnf(fc, fmt, ...) warnf(fc, fmt, ## __VA_ARGS__)
+#define nfs_errorf(fc, fmt, ...) ((fc)->log.log ?              \
+       errorf(fc, fmt, ## __VA_ARGS__) :                       \
+       ({ dprintk(fmt "\n", ## __VA_ARGS__); }))
+
+#define nfs_ferrorf(fc, fac, fmt, ...) ((fc)->log.log ?                \
+       errorf(fc, fmt, ## __VA_ARGS__) :                       \
+       ({ dfprintk(fac, fmt "\n", ## __VA_ARGS__); }))
+
+#define nfs_invalf(fc, fmt, ...) ((fc)->log.log ?              \
+       invalf(fc, fmt, ## __VA_ARGS__) :                       \
+       ({ dprintk(fmt "\n", ## __VA_ARGS__);  -EINVAL; }))
+
+#define nfs_finvalf(fc, fac, fmt, ...) ((fc)->log.log ?                \
+       invalf(fc, fmt, ## __VA_ARGS__) :                       \
+       ({ dfprintk(fac, fmt "\n", ## __VA_ARGS__);  -EINVAL; }))
+
+#define nfs_warnf(fc, fmt, ...) ((fc)->log.log ?               \
+       warnf(fc, fmt, ## __VA_ARGS__) :                        \
+       ({ dprintk(fmt "\n", ## __VA_ARGS__); }))
+
+#define nfs_fwarnf(fc, fac, fmt, ...) ((fc)->log.log ?         \
+       warnf(fc, fmt, ## __VA_ARGS__) :                        \
+       ({ dfprintk(fac, fmt "\n", ## __VA_ARGS__); }))
 
 static inline struct nfs_fs_context *nfs_fc2context(const struct fs_context *fc)
 {
@@ -579,12 +599,14 @@ extern void nfs4_test_session_trunk(struct rpc_clnt *clnt,
 
 static inline struct inode *nfs_igrab_and_active(struct inode *inode)
 {
-       inode = igrab(inode);
-       if (inode != NULL && !nfs_sb_active(inode->i_sb)) {
-               iput(inode);
-               inode = NULL;
+       struct super_block *sb = inode->i_sb;
+
+       if (sb && nfs_sb_active(sb)) {
+               if (igrab(inode))
+                       return inode;
+               nfs_sb_deactive(sb);
        }
-       return inode;
+       return NULL;
 }
 
 static inline void nfs_iput_and_deactive(struct inode *inode)
index 0ce04e0e5d829003f72670500632352b7694577b..2f4679a62712a9dd8406ac73f2961ea5d69c3480 100644 (file)
@@ -3536,10 +3536,8 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
        trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
 
        /* Handle Layoutreturn errors */
-       if (pnfs_roc_done(task, calldata->inode,
-                               &calldata->arg.lr_args,
-                               &calldata->res.lr_res,
-                               &calldata->res.lr_ret) == -EAGAIN)
+       if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res,
+                         &calldata->res.lr_ret) == -EAGAIN)
                goto out_restart;
 
        /* hmm. we are done with the inode, and in the process of freeing
@@ -6384,10 +6382,8 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
        trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
 
        /* Handle Layoutreturn errors */
-       if (pnfs_roc_done(task, data->inode,
-                               &data->args.lr_args,
-                               &data->res.lr_res,
-                               &data->res.lr_ret) == -EAGAIN)
+       if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res,
+                         &data->res.lr_ret) == -EAGAIN)
                goto out_restart;
 
        switch (task->tk_status) {
@@ -6441,10 +6437,10 @@ static void nfs4_delegreturn_release(void *calldata)
        struct nfs4_delegreturndata *data = calldata;
        struct inode *inode = data->inode;
 
+       if (data->lr.roc)
+               pnfs_roc_release(&data->lr.arg, &data->lr.res,
+                                data->res.lr_ret);
        if (inode) {
-               if (data->lr.roc)
-                       pnfs_roc_release(&data->lr.arg, &data->lr.res,
-                                       data->res.lr_ret);
                nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
                nfs_iput_and_deactive(inode);
        }
@@ -6520,16 +6516,14 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
        nfs_fattr_init(data->res.fattr);
        data->timestamp = jiffies;
        data->rpc_status = 0;
-       data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res, cred);
        data->inode = nfs_igrab_and_active(inode);
-       if (data->inode) {
+       if (data->inode || issync) {
+               data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res,
+                                       cred);
                if (data->lr.roc) {
                        data->args.lr_args = &data->lr.arg;
                        data->res.lr_res = &data->lr.res;
                }
-       } else if (data->lr.roc) {
-               pnfs_roc_release(&data->lr.arg, &data->lr.res, 0);
-               data->lr.roc = false;
        }
 
        task_setup_data.callback_data = data;
@@ -7111,9 +7105,9 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
                                        data->arg.new_lock_owner, ret);
        } else
                data->cancelled = true;
+       trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
        rpc_put_task(task);
        dprintk("%s: done, ret = %d!\n", __func__, ret);
-       trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
        return ret;
 }
 
index 984cc42ee54d8ccd93e3ba8a0afa9eb85e0c2042..d09bcfd7db89488eec9291c4e985e13953f079d3 100644 (file)
@@ -227,7 +227,7 @@ int nfs4_try_get_tree(struct fs_context *fc)
                           fc, ctx->nfs_server.hostname,
                           ctx->nfs_server.export_path);
        if (err) {
-               nfs_errorf(fc, "NFS4: Couldn't follow remote path");
+               nfs_ferrorf(fc, MOUNT, "NFS4: Couldn't follow remote path");
                dfprintk(MOUNT, "<-- nfs4_try_get_tree() = %d [error]\n", err);
        } else {
                dfprintk(MOUNT, "<-- nfs4_try_get_tree() = 0\n");
@@ -250,7 +250,7 @@ int nfs4_get_referral_tree(struct fs_context *fc)
                            fc, ctx->nfs_server.hostname,
                            ctx->nfs_server.export_path);
        if (err) {
-               nfs_errorf(fc, "NFS4: Couldn't follow remote path");
+               nfs_ferrorf(fc, MOUNT, "NFS4: Couldn't follow remote path");
                dfprintk(MOUNT, "<-- nfs4_get_referral_tree() = %d [error]\n", err);
        } else {
                dfprintk(MOUNT, "<-- nfs4_get_referral_tree() = 0\n");
index 07f59dc8cb2e77e17c8d7cbcbbac94a58c9b9256..4f274f21c4abcb9c2d96bc4088c3f1f4003258f0 100644 (file)
@@ -1152,7 +1152,7 @@ void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
        LIST_HEAD(freeme);
 
        spin_lock(&inode->i_lock);
-       if (!pnfs_layout_is_valid(lo) || !arg_stateid ||
+       if (!pnfs_layout_is_valid(lo) ||
            !nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
                goto out_unlock;
        if (stateid) {
@@ -1509,10 +1509,8 @@ out_noroc:
        return false;
 }
 
-int pnfs_roc_done(struct rpc_task *task, struct inode *inode,
-               struct nfs4_layoutreturn_args **argpp,
-               struct nfs4_layoutreturn_res **respp,
-               int *ret)
+int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp,
+                 struct nfs4_layoutreturn_res **respp, int *ret)
 {
        struct nfs4_layoutreturn_args *arg = *argpp;
        int retval = -EAGAIN;
@@ -1545,7 +1543,7 @@ int pnfs_roc_done(struct rpc_task *task, struct inode *inode,
                return 0;
        case -NFS4ERR_OLD_STATEID:
                if (!nfs4_layout_refresh_old_stateid(&arg->stateid,
-                                       &arg->range, inode))
+                                                    &arg->range, arg->inode))
                        break;
                *ret = -NFS4ERR_NOMATCHING_LAYOUT;
                return -EAGAIN;
@@ -1560,23 +1558,28 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
                int ret)
 {
        struct pnfs_layout_hdr *lo = args->layout;
-       const nfs4_stateid *arg_stateid = NULL;
+       struct inode *inode = args->inode;
        const nfs4_stateid *res_stateid = NULL;
        struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
 
        switch (ret) {
        case -NFS4ERR_NOMATCHING_LAYOUT:
+               spin_lock(&inode->i_lock);
+               if (pnfs_layout_is_valid(lo) &&
+                   nfs4_stateid_match_other(&args->stateid, &lo->plh_stateid))
+                       pnfs_set_plh_return_info(lo, args->range.iomode, 0);
+               pnfs_clear_layoutreturn_waitbit(lo);
+               spin_unlock(&inode->i_lock);
                break;
        case 0:
                if (res->lrs_present)
                        res_stateid = &res->stateid;
                fallthrough;
        default:
-               arg_stateid = &args->stateid;
+               pnfs_layoutreturn_free_lsegs(lo, &args->stateid, &args->range,
+                                            res_stateid);
        }
        trace_nfs4_layoutreturn_on_close(args->inode, &args->stateid, ret);
-       pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
-                       res_stateid);
        if (ld_private && ld_private->ops && ld_private->ops->free)
                ld_private->ops->free(ld_private);
        pnfs_put_layout_hdr(lo);
@@ -2015,6 +2018,27 @@ lookup_again:
                goto lookup_again;
        }
 
+       /*
+        * Because we free lsegs when sending LAYOUTRETURN, we need to wait
+        * for LAYOUTRETURN.
+        */
+       if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
+               spin_unlock(&ino->i_lock);
+               dprintk("%s wait for layoutreturn\n", __func__);
+               lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
+               if (!IS_ERR(lseg)) {
+                       pnfs_put_layout_hdr(lo);
+                       dprintk("%s retrying\n", __func__);
+                       trace_pnfs_update_layout(ino, pos, count, iomode, lo,
+                                                lseg,
+                                                PNFS_UPDATE_LAYOUT_RETRY);
+                       goto lookup_again;
+               }
+               trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
+                                        PNFS_UPDATE_LAYOUT_RETURN);
+               goto out_put_layout_hdr;
+       }
+
        lseg = pnfs_find_lseg(lo, &arg, strict_iomode);
        if (lseg) {
                trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
@@ -2067,28 +2091,6 @@ lookup_again:
                nfs4_stateid_copy(&stateid, &lo->plh_stateid);
        }
 
-       /*
-        * Because we free lsegs before sending LAYOUTRETURN, we need to wait
-        * for LAYOUTRETURN even if first is true.
-        */
-       if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
-               spin_unlock(&ino->i_lock);
-               dprintk("%s wait for layoutreturn\n", __func__);
-               lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
-               if (!IS_ERR(lseg)) {
-                       if (first)
-                               pnfs_clear_first_layoutget(lo);
-                       pnfs_put_layout_hdr(lo);
-                       dprintk("%s retrying\n", __func__);
-                       trace_pnfs_update_layout(ino, pos, count, iomode, lo,
-                                       lseg, PNFS_UPDATE_LAYOUT_RETRY);
-                       goto lookup_again;
-               }
-               trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
-                               PNFS_UPDATE_LAYOUT_RETURN);
-               goto out_put_layout_hdr;
-       }
-
        if (pnfs_layoutgets_blocked(lo)) {
                trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
                                PNFS_UPDATE_LAYOUT_BLOCKED);
@@ -2242,6 +2244,7 @@ static void _lgopen_prepare_attached(struct nfs4_opendata *data,
                                             &rng, GFP_KERNEL);
        if (!lgp) {
                pnfs_clear_first_layoutget(lo);
+               nfs_layoutget_end(lo);
                pnfs_put_layout_hdr(lo);
                return;
        }
index bbd3de1025f23cb3bc5e78a46abef5e1ae56eba5..d810ae674f4e8aba15c34d2bf730a8c95d0aa5db 100644 (file)
@@ -297,10 +297,8 @@ bool pnfs_roc(struct inode *ino,
                struct nfs4_layoutreturn_args *args,
                struct nfs4_layoutreturn_res *res,
                const struct cred *cred);
-int pnfs_roc_done(struct rpc_task *task, struct inode *inode,
-               struct nfs4_layoutreturn_args **argpp,
-               struct nfs4_layoutreturn_res **respp,
-               int *ret);
+int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp,
+                 struct nfs4_layoutreturn_res **respp, int *ret);
 void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
                struct nfs4_layoutreturn_res *res,
                int ret);
@@ -772,7 +770,7 @@ pnfs_roc(struct inode *ino,
 }
 
 static inline int
-pnfs_roc_done(struct rpc_task *task, struct inode *inode,
+pnfs_roc_done(struct rpc_task *task,
                struct nfs4_layoutreturn_args **argpp,
                struct nfs4_layoutreturn_res **respp,
                int *ret)
index 2efcfdd348a111a86c9647250a9d51b6d21f2dc0..49d3389bd81300f1a14ca91e2b5e80195e776aaf 100644 (file)
@@ -78,22 +78,18 @@ void
 pnfs_generic_clear_request_commit(struct nfs_page *req,
                                  struct nfs_commit_info *cinfo)
 {
-       struct pnfs_layout_segment *freeme = NULL;
+       struct pnfs_commit_bucket *bucket = NULL;
 
        if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
                goto out;
        cinfo->ds->nwritten--;
-       if (list_is_singular(&req->wb_list)) {
-               struct pnfs_commit_bucket *bucket;
-
+       if (list_is_singular(&req->wb_list))
                bucket = list_first_entry(&req->wb_list,
-                                         struct pnfs_commit_bucket,
-                                         written);
-               freeme = pnfs_free_bucket_lseg(bucket);
-       }
+                                         struct pnfs_commit_bucket, written);
 out:
        nfs_request_remove_commit_list(req, cinfo);
-       pnfs_put_lseg(freeme);
+       if (bucket)
+               pnfs_put_lseg(pnfs_free_bucket_lseg(bucket));
 }
 EXPORT_SYMBOL_GPL(pnfs_generic_clear_request_commit);
 
@@ -407,12 +403,16 @@ pnfs_bucket_get_committing(struct list_head *head,
                           struct pnfs_commit_bucket *bucket,
                           struct nfs_commit_info *cinfo)
 {
+       struct pnfs_layout_segment *lseg;
        struct list_head *pos;
 
        list_for_each(pos, &bucket->committing)
                cinfo->ds->ncommitting--;
        list_splice_init(&bucket->committing, head);
-       return pnfs_free_bucket_lseg(bucket);
+       lseg = pnfs_free_bucket_lseg(bucket);
+       if (!lseg)
+               lseg = pnfs_get_lseg(bucket->lseg);
+       return lseg;
 }
 
 static struct nfs_commit_data *
@@ -424,8 +424,6 @@ pnfs_bucket_fetch_commitdata(struct pnfs_commit_bucket *bucket,
        if (!data)
                return NULL;
        data->lseg = pnfs_bucket_get_committing(&data->pages, bucket, cinfo);
-       if (!data->lseg)
-               data->lseg = pnfs_get_lseg(bucket->lseg);
        return data;
 }
 
index 821db21ba072cd7dbf7a70c69bd8f0ee378758f6..34b880211e5eab8e63fd3d034a9a3b6aa8b4fa40 100644 (file)
@@ -865,9 +865,14 @@ compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp,
        if (isdotent(name, namlen)) {
                if (namlen == 2) {
                        dchild = dget_parent(dparent);
-                       /* filesystem root - cannot return filehandle for ".." */
+                       /*
+                        * Don't return filehandle for ".." if we're at
+                        * the filesystem or export root:
+                        */
                        if (dchild == dparent)
                                goto out;
+                       if (dparent == exp->ex_path.dentry)
+                               goto out;
                } else
                        dchild = dget(dparent);
        } else
index 4727b7f03c5bb24a260465b70788db5e2e1baee6..8d6d2678abade46ac06ba69a8a84700335c424de 100644 (file)
 #include "pnfs.h"
 #include "trace.h"
 
+static bool inter_copy_offload_enable;
+module_param(inter_copy_offload_enable, bool, 0644);
+MODULE_PARM_DESC(inter_copy_offload_enable,
+                "Enable inter server to server copy offload. Default: false");
+
 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
 #include <linux/security.h>
 
index 45ee6b12ce5b70d327d08bb9a10745d659383a90..eaaa1605b5b5f38a51065a8bcc21ebc76d5a0190 100644 (file)
@@ -147,6 +147,25 @@ svcxdr_dupstr(struct nfsd4_compoundargs *argp, void *buf, u32 len)
        return p;
 }
 
+static void *
+svcxdr_savemem(struct nfsd4_compoundargs *argp, __be32 *p, u32 len)
+{
+       __be32 *tmp;
+
+       /*
+        * The location of the decoded data item is stable,
+        * so @p is OK to use. This is the common case.
+        */
+       if (p != argp->xdr->scratch.iov_base)
+               return p;
+
+       tmp = svcxdr_tmpalloc(argp, len);
+       if (!tmp)
+               return NULL;
+       memcpy(tmp, p, len);
+       return tmp;
+}
+
 /*
  * NFSv4 basic data type decoders
  */
@@ -183,11 +202,10 @@ nfsd4_decode_opaque(struct nfsd4_compoundargs *argp, struct xdr_netobj *o)
        p = xdr_inline_decode(argp->xdr, len);
        if (!p)
                return nfserr_bad_xdr;
-       o->data = svcxdr_tmpalloc(argp, len);
+       o->data = svcxdr_savemem(argp, p, len);
        if (!o->data)
                return nfserr_jukebox;
        o->len = len;
-       memcpy(o->data, p, len);
 
        return nfs_ok;
 }
@@ -205,10 +223,9 @@ nfsd4_decode_component4(struct nfsd4_compoundargs *argp, char **namp, u32 *lenp)
        status = check_filename((char *)p, *lenp);
        if (status)
                return status;
-       *namp = svcxdr_tmpalloc(argp, *lenp);
+       *namp = svcxdr_savemem(argp, p, *lenp);
        if (!*namp)
                return nfserr_jukebox;
-       memcpy(*namp, p, *lenp);
 
        return nfs_ok;
 }
@@ -1200,10 +1217,9 @@ nfsd4_decode_putfh(struct nfsd4_compoundargs *argp, struct nfsd4_putfh *putfh)
        p = xdr_inline_decode(argp->xdr, putfh->pf_fhlen);
        if (!p)
                return nfserr_bad_xdr;
-       putfh->pf_fhval = svcxdr_tmpalloc(argp, putfh->pf_fhlen);
+       putfh->pf_fhval = svcxdr_savemem(argp, p, putfh->pf_fhlen);
        if (!putfh->pf_fhval)
                return nfserr_jukebox;
-       memcpy(putfh->pf_fhval, p, putfh->pf_fhlen);
 
        return nfs_ok;
 }
@@ -1318,24 +1334,20 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient
        p = xdr_inline_decode(argp->xdr, setclientid->se_callback_netid_len);
        if (!p)
                return nfserr_bad_xdr;
-       setclientid->se_callback_netid_val = svcxdr_tmpalloc(argp,
+       setclientid->se_callback_netid_val = svcxdr_savemem(argp, p,
                                                setclientid->se_callback_netid_len);
        if (!setclientid->se_callback_netid_val)
                return nfserr_jukebox;
-       memcpy(setclientid->se_callback_netid_val, p,
-              setclientid->se_callback_netid_len);
 
        if (xdr_stream_decode_u32(argp->xdr, &setclientid->se_callback_addr_len) < 0)
                return nfserr_bad_xdr;
        p = xdr_inline_decode(argp->xdr, setclientid->se_callback_addr_len);
        if (!p)
                return nfserr_bad_xdr;
-       setclientid->se_callback_addr_val = svcxdr_tmpalloc(argp,
+       setclientid->se_callback_addr_val = svcxdr_savemem(argp, p,
                                                setclientid->se_callback_addr_len);
        if (!setclientid->se_callback_addr_val)
                return nfserr_jukebox;
-       memcpy(setclientid->se_callback_addr_val, p,
-              setclientid->se_callback_addr_len);
        if (xdr_stream_decode_u32(argp->xdr, &setclientid->se_callback_ident) < 0)
                return nfserr_bad_xdr;
 
@@ -1375,10 +1387,9 @@ nfsd4_decode_verify(struct nfsd4_compoundargs *argp, struct nfsd4_verify *verify
        p = xdr_inline_decode(argp->xdr, verify->ve_attrlen);
        if (!p)
                return nfserr_bad_xdr;
-       verify->ve_attrval = svcxdr_tmpalloc(argp, verify->ve_attrlen);
+       verify->ve_attrval = svcxdr_savemem(argp, p, verify->ve_attrlen);
        if (!verify->ve_attrval)
                return nfserr_jukebox;
-       memcpy(verify->ve_attrval, p, verify->ve_attrlen);
 
        return nfs_ok;
 }
@@ -2333,10 +2344,9 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
                p = xdr_inline_decode(argp->xdr, argp->taglen);
                if (!p)
                        return 0;
-               argp->tag = svcxdr_tmpalloc(argp, argp->taglen);
+               argp->tag = svcxdr_savemem(argp, p, argp->taglen);
                if (!argp->tag)
                        return 0;
-               memcpy(argp->tag, p, argp->taglen);
                max_reply += xdr_align_size(argp->taglen);
        }
 
@@ -4756,6 +4766,7 @@ nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp,
                            resp->rqstp->rq_vec, read->rd_vlen, maxcount, eof);
        if (nfserr)
                return nfserr;
+       xdr_truncate_encode(xdr, starting_len + 16 + xdr_align_size(*maxcount));
 
        tmp = htonl(NFS4_CONTENT_DATA);
        write_bytes_to_xdr_buf(xdr->buf, starting_len,      &tmp,   4);
@@ -4763,6 +4774,10 @@ nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp,
        write_bytes_to_xdr_buf(xdr->buf, starting_len + 4,  &tmp64, 8);
        tmp = htonl(*maxcount);
        write_bytes_to_xdr_buf(xdr->buf, starting_len + 12, &tmp,   4);
+
+       tmp = xdr_zero;
+       write_bytes_to_xdr_buf(xdr->buf, starting_len + 16 + *maxcount, &tmp,
+                              xdr_pad_size(*maxcount));
        return nfs_ok;
 }
 
@@ -4855,14 +4870,15 @@ out:
        if (nfserr && segments == 0)
                xdr_truncate_encode(xdr, starting_len);
        else {
-               tmp = htonl(eof);
-               write_bytes_to_xdr_buf(xdr->buf, starting_len,     &tmp, 4);
-               tmp = htonl(segments);
-               write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp, 4);
                if (nfserr) {
                        xdr_truncate_encode(xdr, last_segment);
                        nfserr = nfs_ok;
+                       eof = 0;
                }
+               tmp = htonl(eof);
+               write_bytes_to_xdr_buf(xdr->buf, starting_len,     &tmp, 4);
+               tmp = htonl(segments);
+               write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp, 4);
        }
 
        return nfserr;
index 00384c332f9bb657a41654c99628d59cddb3b3df..f9c9f4c63cc77db4b67700e66cf521caa11a7017 100644 (file)
 
 #define NFSDDBG_FACILITY       NFSDDBG_SVC
 
-bool inter_copy_offload_enable;
-EXPORT_SYMBOL_GPL(inter_copy_offload_enable);
-module_param(inter_copy_offload_enable, bool, 0644);
-MODULE_PARM_DESC(inter_copy_offload_enable,
-                "Enable inter server to server copy offload. Default: false");
-
 extern struct svc_program      nfsd_program;
 static int                     nfsd(void *vrqstp);
 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
index a60ff5ce1a375732df4552d3bb1d52b49f9c15bf..c300885ae75ddee7866e519c0d71cebadfa5d6da 100644 (file)
@@ -568,7 +568,6 @@ struct nfsd4_copy {
        struct nfs_fh           c_fh;
        nfs4_stateid            stateid;
 };
-extern bool inter_copy_offload_enable;
 
 struct nfsd4_seek {
        /* request */
index 3e01d8f2ab9061faa43f865e1f9f3dc2868e5dbc..dcab112e1f0012073456c56b52b1ce4ac261d9a4 100644 (file)
@@ -1285,26 +1285,23 @@ fput_and_out:
        return ret;
 }
 
+#ifndef CONFIG_ARCH_SPLIT_ARG64
 SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
                              __u64, mask, int, dfd,
                              const char  __user *, pathname)
 {
        return do_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname);
 }
+#endif
 
-#ifdef CONFIG_COMPAT
-COMPAT_SYSCALL_DEFINE6(fanotify_mark,
+#if defined(CONFIG_ARCH_SPLIT_ARG64) || defined(CONFIG_COMPAT)
+SYSCALL32_DEFINE6(fanotify_mark,
                                int, fanotify_fd, unsigned int, flags,
-                               __u32, mask0, __u32, mask1, int, dfd,
+                               SC_ARG64(mask), int, dfd,
                                const char  __user *, pathname)
 {
-       return do_fanotify_mark(fanotify_fd, flags,
-#ifdef __BIG_ENDIAN
-                               ((__u64)mask0 << 32) | mask1,
-#else
-                               ((__u64)mask1 << 32) | mask0,
-#endif
-                                dfd, pathname);
+       return do_fanotify_mark(fanotify_fd, flags, SC_VAL64(__u64, mask),
+                               dfd, pathname);
 }
 #endif
 
index ee5a235b30562721d9800dbfafc3b958d51d8135..602e3a52884d883c639420e426b539fd74ef35c5 100644 (file)
@@ -1035,6 +1035,25 @@ struct clear_refs_private {
 };
 
 #ifdef CONFIG_MEM_SOFT_DIRTY
+
+#define is_cow_mapping(flags) (((flags) & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE)
+
+static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
+{
+       struct page *page;
+
+       if (!pte_write(pte))
+               return false;
+       if (!is_cow_mapping(vma->vm_flags))
+               return false;
+       if (likely(!atomic_read(&vma->vm_mm->has_pinned)))
+               return false;
+       page = vm_normal_page(vma, addr, pte);
+       if (!page)
+               return false;
+       return page_maybe_dma_pinned(page);
+}
+
 static inline void clear_soft_dirty(struct vm_area_struct *vma,
                unsigned long addr, pte_t *pte)
 {
@@ -1049,6 +1068,8 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
        if (pte_present(ptent)) {
                pte_t old_pte;
 
+               if (pte_is_pinned(vma, addr, ptent))
+                       return;
                old_pte = ptep_modify_prot_start(vma, addr, pte);
                ptent = pte_wrprotect(old_pte);
                ptent = pte_clear_soft_dirty(ptent);
@@ -1215,41 +1236,26 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                        .type = type,
                };
 
+               if (mmap_write_lock_killable(mm)) {
+                       count = -EINTR;
+                       goto out_mm;
+               }
                if (type == CLEAR_REFS_MM_HIWATER_RSS) {
-                       if (mmap_write_lock_killable(mm)) {
-                               count = -EINTR;
-                               goto out_mm;
-                       }
-
                        /*
                         * Writing 5 to /proc/pid/clear_refs resets the peak
                         * resident set size to this mm's current rss value.
                         */
                        reset_mm_hiwater_rss(mm);
-                       mmap_write_unlock(mm);
-                       goto out_mm;
+                       goto out_unlock;
                }
 
-               if (mmap_read_lock_killable(mm)) {
-                       count = -EINTR;
-                       goto out_mm;
-               }
                tlb_gather_mmu(&tlb, mm, 0, -1);
                if (type == CLEAR_REFS_SOFT_DIRTY) {
                        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                                if (!(vma->vm_flags & VM_SOFTDIRTY))
                                        continue;
-                               mmap_read_unlock(mm);
-                               if (mmap_write_lock_killable(mm)) {
-                                       count = -EINTR;
-                                       goto out_mm;
-                               }
-                               for (vma = mm->mmap; vma; vma = vma->vm_next) {
-                                       vma->vm_flags &= ~VM_SOFTDIRTY;
-                                       vma_set_page_prot(vma);
-                               }
-                               mmap_write_downgrade(mm);
-                               break;
+                               vma->vm_flags &= ~VM_SOFTDIRTY;
+                               vma_set_page_prot(vma);
                        }
 
                        mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
@@ -1261,7 +1267,8 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                if (type == CLEAR_REFS_SOFT_DIRTY)
                        mmu_notifier_invalidate_range_end(&range);
                tlb_finish_mmu(&tlb, 0, -1);
-               mmap_read_unlock(mm);
+out_unlock:
+               mmap_write_unlock(mm);
 out_mm:
                mmput(mm);
        }
index ebfebdfe5c69a168840aad4bc5e8ccdd4d7e6dbe..37aaa8317f3ae1a6a9aa9153f336eefa6c2b1aa4 100644 (file)
@@ -1011,14 +1011,17 @@ static int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
        fdcount = do_poll(head, &table, end_time);
        poll_freewait(&table);
 
+       if (!user_write_access_begin(ufds, nfds * sizeof(*ufds)))
+               goto out_fds;
+
        for (walk = head; walk; walk = walk->next) {
                struct pollfd *fds = walk->entries;
                int j;
 
-               for (j = 0; j < walk->len; j++, ufds++)
-                       if (__put_user(fds[j].revents, &ufds->revents))
-                               goto out_fds;
+               for (j = walk->len; j; fds++, ufds++, j--)
+                       unsafe_put_user(fds->revents, &ufds->revents, Efault);
        }
+       user_write_access_end();
 
        err = fdcount;
 out_fds:
@@ -1030,6 +1033,11 @@ out_fds:
        }
 
        return err;
+
+Efault:
+       user_write_access_end();
+       err = -EFAULT;
+       goto out_fds;
 }
 
 static long do_restart_poll(struct restart_block *restart_block)
index ef2697b78820d4634f47a258313e7ddca43daa11..827278f937fe7f489c7fa9c553d730a6a6217718 100644 (file)
@@ -3,6 +3,7 @@ config ZONEFS_FS
        depends on BLOCK
        depends on BLK_DEV_ZONED
        select FS_IOMAP
+       select CRC32
        help
          zonefs is a simple file system which exposes zones of a zoned block
          device (e.g. host-managed or host-aware SMR disk drives) as files.
index 4365b9aa3e3f631086d9b31af9c3e78b9947c6dc..267f6dfb8960b0c2131c6f91ee1bc482f8fb3993 100644 (file)
@@ -34,6 +34,7 @@ mandatory-y += kmap_size.h
 mandatory-y += kprobes.h
 mandatory-y += linkage.h
 mandatory-y += local.h
+mandatory-y += local64.h
 mandatory-y += mm-arch-hooks.h
 mandatory-y += mmiowb.h
 mandatory-y += mmu.h
index dd90c9792909d1db39c2a8a52cc78216a034f1ba..0e7316a86240b8c309d6b49ba0d8c1663d2159e5 100644 (file)
  * See Documentation/atomic_bitops.txt for details.
  */
 
-static inline void set_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void set_bit(unsigned int nr, volatile unsigned long *p)
 {
        p += BIT_WORD(nr);
        atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
 }
 
-static inline void clear_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void clear_bit(unsigned int nr, volatile unsigned long *p)
 {
        p += BIT_WORD(nr);
        atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
 }
 
-static inline void change_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void change_bit(unsigned int nr, volatile unsigned long *p)
 {
        p += BIT_WORD(nr);
        atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
index fc85f50fa0e96e6fd6111c7e027a81ebe6004829..8dcb3e1477bc98faa7d62171ba5ec8166a41bc69 100644 (file)
@@ -13,7 +13,7 @@
 #define ARMV8_PMU_CYCLE_IDX            (ARMV8_PMU_MAX_COUNTERS - 1)
 #define ARMV8_PMU_MAX_COUNTER_PAIRS    ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
 
-#ifdef CONFIG_KVM_ARM_PMU
+#ifdef CONFIG_HW_PERF_EVENTS
 
 struct kvm_pmc {
        u8 idx; /* index into the pmu->pmc array */
index 2630c2e953f73f9b21a100238dfb519cef3fc7ba..053bf05fb1f7606e23fb7949c8f0132a82a9c10e 100644 (file)
@@ -885,6 +885,13 @@ static inline int acpi_device_modalias(struct device *dev,
        return -ENODEV;
 }
 
+static inline struct platform_device *
+acpi_create_platform_device(struct acpi_device *adev,
+                           struct property_entry *properties)
+{
+       return NULL;
+}
+
 static inline bool acpi_dma_supported(struct acpi_device *adev)
 {
        return false;
index 47b021952ac753d92ad2137080ce771b2bbe46e6..d705b174d346ac54d344b732a6e6c1c327768c36 100644 (file)
@@ -447,8 +447,8 @@ enum {
        BLK_MQ_REQ_NOWAIT       = (__force blk_mq_req_flags_t)(1 << 0),
        /* allocate from reserved pool */
        BLK_MQ_REQ_RESERVED     = (__force blk_mq_req_flags_t)(1 << 1),
-       /* set RQF_PREEMPT */
-       BLK_MQ_REQ_PREEMPT      = (__force blk_mq_req_flags_t)(1 << 3),
+       /* set RQF_PM */
+       BLK_MQ_REQ_PM           = (__force blk_mq_req_flags_t)(1 << 2),
 };
 
 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
index 070de09425ada7233f8899eecab0c404b30fb40b..f94ee3089e015ebe2424d899e70a3f824889c299 100644 (file)
@@ -79,9 +79,6 @@ typedef __u32 __bitwise req_flags_t;
 #define RQF_MQ_INFLIGHT                ((__force req_flags_t)(1 << 6))
 /* don't call prep for this one */
 #define RQF_DONTPREP           ((__force req_flags_t)(1 << 7))
-/* set for "ide_preempt" requests and also for requests for which the SCSI
-   "quiesce" state must be ignored. */
-#define RQF_PREEMPT            ((__force req_flags_t)(1 << 8))
 /* vaguely specified driver internal error.  Ignored by the block layer */
 #define RQF_FAILED             ((__force req_flags_t)(1 << 10))
 /* don't warn about errors */
@@ -430,8 +427,7 @@ struct request_queue {
        unsigned long           queue_flags;
        /*
         * Number of contexts that have called blk_set_pm_only(). If this
-        * counter is above zero then only RQF_PM and RQF_PREEMPT requests are
-        * processed.
+        * counter is above zero then only RQF_PM requests are processed.
         */
        atomic_t                pm_only;
 
@@ -696,6 +692,18 @@ static inline bool queue_is_mq(struct request_queue *q)
        return q->mq_ops;
 }
 
+#ifdef CONFIG_PM
+static inline enum rpm_status queue_rpm_status(struct request_queue *q)
+{
+       return q->rpm_status;
+}
+#else
+static inline enum rpm_status queue_rpm_status(struct request_queue *q)
+{
+       return RPM_ACTIVE;
+}
+#endif
+
 static inline enum blk_zoned_model
 blk_queue_zoned_model(struct request_queue *q)
 {
index 7bb66e15b481b27cd51e0c522f80d6206233f9bb..e3a0be2c90ad9846167f65740be7c6f97763e3c3 100644 (file)
@@ -77,9 +77,4 @@
 #define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr)
 #define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
 
-#ifdef __GENKSYMS__
-/* genksyms gets confused by _Static_assert */
-#define _Static_assert(expr, ...)
-#endif
-
 #endif /* _LINUX_BUILD_BUG_H */
index f5e02f6c06555966119b245ce4615adc0e2f892d..3989dcb94d3d1cfa733ace3685f5123b96d42d71 100644 (file)
@@ -33,8 +33,8 @@
 #define CEPH_MSGR2_INCARNATION_1 (0ull)
 
 #define DEFINE_MSGR2_FEATURE(bit, incarnation, name)               \
-       static const uint64_t CEPH_MSGR2_FEATURE_##name = (1ULL << bit); \
-       static const uint64_t CEPH_MSGR2_FEATUREMASK_##name =            \
+       static const uint64_t __maybe_unused CEPH_MSGR2_FEATURE_##name = (1ULL << bit); \
+       static const uint64_t __maybe_unused CEPH_MSGR2_FEATUREMASK_##name =            \
                        (1ULL << bit | CEPH_MSGR2_INCARNATION_##incarnation);
 
 #define HAVE_MSGR2_FEATURE(x, name) \
index 74c6c0486eed782e267b1addd65669f1422bad65..555ab0fddbef7dc937a178e49621598736408c05 100644 (file)
 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 */
 #if GCC_VERSION < 40900
 # error Sorry, your version of GCC is too old - please use 4.9 or newer.
+#elif defined(CONFIG_ARM64) && GCC_VERSION < 50100
+/*
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63293
+ * https://lore.kernel.org/r/20210107111841.GN1551@shell.armlinux.org.uk
+ */
+# error Sorry, your version of GCC is too old - please use 5.1 or newer.
 #endif
 
 /*
index b2a3f4f641a70745d94e6b84acbc2261d8e7a03d..ea5e04e75845c8e24612221dd77d5db062e36e2f 100644 (file)
  */
 #define __used                          __attribute__((__used__))
 
+/*
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warn_005funused_005fresult-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#nodiscard-warn-unused-result
+ */
+#define __must_check                    __attribute__((__warn_unused_result__))
+
 /*
  *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-weak-function-attribute
  *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-weak-variable-attribute
index bbaa39e98f9fa916111490aee6ebea7420bbb3ef..e5dd5a4ae94608b9b5d0950a81ba272f0dde8997 100644 (file)
@@ -121,12 +121,6 @@ struct ftrace_likely_data {
        unsigned long                   constant;
 };
 
-#ifdef CONFIG_ENABLE_MUST_CHECK
-#define __must_check           __attribute__((__warn_unused_result__))
-#else
-#define __must_check
-#endif
-
 #if defined(CC_USING_HOTPATCH)
 #define notrace                        __attribute__((hotpatch(0, 0)))
 #elif defined(CC_USING_PATCHABLE_FUNCTION_ENTRY)
index dbe78e8e26029ef908d027deee29fec5c0e57638..20874db50bc8a5ad054bdc607e44d842fe5f7274 100644 (file)
@@ -186,12 +186,9 @@ extern int braille_register_console(struct console *, int index,
 extern int braille_unregister_console(struct console *);
 #ifdef CONFIG_TTY
 extern void console_sysfs_notify(void);
-extern void register_ttynull_console(void);
 #else
 static inline void console_sysfs_notify(void)
 { }
-static inline void register_ttynull_console(void)
-{ }
 #endif
 extern bool console_suspend_enabled;
 
index 29d255fdd5d641cf31eefefcc3d47744e87734d3..90bd558a17f516780329589ea6a07d62b9909c5a 100644 (file)
@@ -150,6 +150,7 @@ void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n);
 
 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c);
 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
+struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c);
 sector_t dm_bufio_get_block_number(struct dm_buffer *b);
 void *dm_bufio_get_block_data(struct dm_buffer *b);
 void *dm_bufio_get_aux_data(struct dm_buffer *b);
index d956987ed032db5b9ce9e9d1df440839c910bc74..09c6a0bf389286e8caf70a556f5142f65908cf23 100644 (file)
@@ -533,11 +533,10 @@ struct dmar_domain {
                                        /* Domain ids per IOMMU. Use u16 since
                                         * domain ids are 16 bit wide according
                                         * to VT-d spec, section 9.3 */
-       unsigned int    auxd_refcnt;    /* Refcount of auxiliary attaching */
 
        bool has_iotlb_device;
        struct list_head devices;       /* all devices' list */
-       struct list_head auxd;          /* link to device's auxiliary list */
+       struct list_head subdevices;    /* all subdevices' list */
        struct iova_domain iovad;       /* iova's that belong to this domain */
 
        struct dma_pte  *pgd;           /* virtual address */
@@ -610,14 +609,21 @@ struct intel_iommu {
        struct dmar_drhd_unit *drhd;
 };
 
+/* Per subdevice private data */
+struct subdev_domain_info {
+       struct list_head link_phys;     /* link to phys device siblings */
+       struct list_head link_domain;   /* link to domain siblings */
+       struct device *pdev;            /* physical device derived from */
+       struct dmar_domain *domain;     /* aux-domain */
+       int users;                      /* user count */
+};
+
 /* PCI domain-device relationship */
 struct device_domain_info {
        struct list_head link;  /* link to domain siblings */
        struct list_head global; /* link to global list */
        struct list_head table; /* link to pasid table */
-       struct list_head auxiliary_domains; /* auxiliary domains
-                                            * attached to this device
-                                            */
+       struct list_head subdevices; /* subdevices sibling */
        u32 segment;            /* PCI segment number */
        u8 bus;                 /* PCI bus number */
        u8 devfn;               /* PCI devfn number */
@@ -758,6 +764,7 @@ struct intel_svm_dev {
        struct list_head list;
        struct rcu_head rcu;
        struct device *dev;
+       struct intel_iommu *iommu;
        struct svm_dev_ops *ops;
        struct iommu_sva sva;
        u32 pasid;
@@ -771,7 +778,6 @@ struct intel_svm {
        struct mmu_notifier notifier;
        struct mm_struct *mm;
 
-       struct intel_iommu *iommu;
        unsigned int flags;
        u32 pasid;
        int gpasid; /* In case that guest PASID is different from host PASID */
index 5e0655fb2a6f7767572f6b82275abfd306a4b386..fe1ae73ff8b574a8ee31e03b07f06cb6d5ca2473 100644 (file)
@@ -35,8 +35,12 @@ struct kunit_kasan_expectation {
 #define KASAN_SHADOW_INIT 0
 #endif
 
+#ifndef PTE_HWTABLE_PTRS
+#define PTE_HWTABLE_PTRS 0
+#endif
+
 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
-extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
+extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS];
 extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
 extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
index a10e84707d820b54739f7dde0064c9334ac6c6e5..4e3037dc12048685390be4c91318646e69a4bb7d 100644 (file)
@@ -52,6 +52,25 @@ static inline void kcov_remote_start_usb(u64 id)
        kcov_remote_start(kcov_remote_handle(KCOV_SUBSYSTEM_USB, id));
 }
 
+/*
+ * The softirq flavor of kcov_remote_*() functions is introduced as a temporary
+ * work around for kcov's lack of nested remote coverage sections support in
+ * task context. Adding suport for nested sections is tracked in:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=210337
+ */
+
+static inline void kcov_remote_start_usb_softirq(u64 id)
+{
+       if (in_serving_softirq())
+               kcov_remote_start_usb(id);
+}
+
+static inline void kcov_remote_stop_softirq(void)
+{
+       if (in_serving_softirq())
+               kcov_remote_stop();
+}
+
 #else
 
 static inline void kcov_task_init(struct task_struct *t) {}
@@ -66,6 +85,8 @@ static inline u64 kcov_common_handle(void)
 }
 static inline void kcov_remote_start_common(u64 id) {}
 static inline void kcov_remote_start_usb(u64 id) {}
+static inline void kcov_remote_start_usb_softirq(u64 id) {}
+static inline void kcov_remote_stop_softirq(void) {}
 
 #endif /* CONFIG_KCOV */
 #endif /* _LINUX_KCOV_H */
index 85b5151911cfd098ca97d0397e440f737de1493a..4856706fbfeb4537a67e40cb54887584045a474f 100644 (file)
        })
 
 /* acceptable for old filesystems */
-static inline bool old_valid_dev(dev_t dev)
+static __always_inline bool old_valid_dev(dev_t dev)
 {
        return MAJOR(dev) < 256 && MINOR(dev) < 256;
 }
 
-static inline u16 old_encode_dev(dev_t dev)
+static __always_inline u16 old_encode_dev(dev_t dev)
 {
        return (MAJOR(dev) << 8) | MINOR(dev);
 }
 
-static inline dev_t old_decode_dev(u16 val)
+static __always_inline dev_t old_decode_dev(u16 val)
 {
        return MKDEV((val >> 8) & 255, val & 255);
 }
 
-static inline u32 new_encode_dev(dev_t dev)
+static __always_inline u32 new_encode_dev(dev_t dev)
 {
        unsigned major = MAJOR(dev);
        unsigned minor = MINOR(dev);
        return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
 }
 
-static inline dev_t new_decode_dev(u32 dev)
+static __always_inline dev_t new_decode_dev(u32 dev)
 {
        unsigned major = (dev & 0xfff00) >> 8;
        unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
        return MKDEV(major, minor);
 }
 
-static inline u64 huge_encode_dev(dev_t dev)
+static __always_inline u64 huge_encode_dev(dev_t dev)
 {
        return new_encode_dev(dev);
 }
 
-static inline dev_t huge_decode_dev(u64 dev)
+static __always_inline dev_t huge_decode_dev(u64 dev)
 {
        return new_decode_dev(dev);
 }
 
-static inline int sysv_valid_dev(dev_t dev)
+static __always_inline int sysv_valid_dev(dev_t dev)
 {
        return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18);
 }
 
-static inline u32 sysv_encode_dev(dev_t dev)
+static __always_inline u32 sysv_encode_dev(dev_t dev)
 {
        return MINOR(dev) | (MAJOR(dev) << 18);
 }
 
-static inline unsigned sysv_major(u32 dev)
+static __always_inline unsigned sysv_major(u32 dev)
 {
        return (dev >> 18) & 0x3fff;
 }
 
-static inline unsigned sysv_minor(u32 dev)
+static __always_inline unsigned sysv_minor(u32 dev)
 {
        return dev & 0x3ffff;
 }
index 5d71e8a8500f5ed13872c19b01cf7a8af9d62c11..aca4dc037b70b7284e6f997264c4c66d2701d8ac 100644 (file)
@@ -35,6 +35,9 @@ struct mdiobb_ctrl {
        const struct mdiobb_ops *ops;
 };
 
+int mdiobb_read(struct mii_bus *bus, int phy, int reg);
+int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val);
+
 /* The returned bus is not yet registered with the phy layer. */
 struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl);
 
index d827bd7f3bfe3a01e3c638ab43fa9673ab0b4cc3..eeb0b52203e929bb0c0e4f869abf395d29d5760d 100644 (file)
@@ -665,7 +665,7 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
 {
        struct mem_cgroup *memcg = page_memcg(page);
 
-       VM_WARN_ON_ONCE_PAGE(!memcg, page);
+       VM_WARN_ON_ONCE_PAGE(!memcg && !mem_cgroup_disabled(), page);
        return mem_cgroup_lruvec(memcg, pgdat);
 }
 
index 8fbddec26eb8c994713ddc3cd2a0ebd69cd479d6..442c0160caab52ac07e94ddeab6581d46b5ad248 100644 (file)
@@ -1280,7 +1280,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         ece_support[0x1];
        u8         reserved_at_a4[0x7];
        u8         log_max_srq[0x5];
-       u8         reserved_at_b0[0x2];
+       u8         reserved_at_b0[0x1];
+       u8         uplink_follow[0x1];
        u8         ts_cqe_to_dest_cqn[0x1];
        u8         reserved_at_b3[0xd];
 
index 5299b90a6c403e3098d5402a60cd9318cde7eb72..ecdf8a8cd6aebe62f49ae45bf7aab623b9e5282b 100644 (file)
@@ -216,6 +216,13 @@ int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *,
                loff_t *);
 int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
                loff_t *);
+/*
+ * Any attempt to mark this function as static leads to build failure
+ * when CONFIG_DEBUG_INFO_BTF is enabled because __add_to_page_cache_locked()
+ * is referred to by BPF code. This must be visible for error injection.
+ */
+int __add_to_page_cache_locked(struct page *page, struct address_space *mapping,
+               pgoff_t index, gfp_t gfp, void **shadowp);
 
 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
 
@@ -2432,8 +2439,9 @@ extern int __meminit early_pfn_to_nid(unsigned long pfn);
 #endif
 
 extern void set_dma_reserve(unsigned long new_dma_reserve);
-extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
-               enum meminit_context, struct vmem_altmap *, int migratetype);
+extern void memmap_init_zone(unsigned long, int, unsigned long,
+               unsigned long, unsigned long, enum meminit_context,
+               struct vmem_altmap *, int migratetype);
 extern void setup_per_zone_wmarks(void);
 extern int __meminit init_per_zone_wmark_min(void);
 extern void mem_init(void);
index bf7966776c5577fd4a92a8c0837943b4f0aa5c54..505480217cf1a9b4f779b454d42bc78c464e189b 100644 (file)
@@ -163,8 +163,6 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
 static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
 #endif
 
-bool arm_pmu_irq_is_nmi(void);
-
 /* Internal functions only for core arm_pmu code */
 struct arm_pmu *armpmu_alloc(void);
 struct arm_pmu *armpmu_alloc_atomic(void);
index de08264113111e0b43c531838489a6e3085f5326..fd02c5fa60cb1de2f18a6b6688ab1bc1471c39a7 100644 (file)
@@ -86,6 +86,12 @@ void rcu_sched_clock_irq(int user);
 void rcu_report_dead(unsigned int cpu);
 void rcutree_migrate_callbacks(int cpu);
 
+#ifdef CONFIG_TASKS_RCU_GENERIC
+void rcu_init_tasks_generic(void);
+#else
+static inline void rcu_init_tasks_generic(void) { }
+#endif
+
 #ifdef CONFIG_RCU_STALL_COMMON
 void rcu_sysrq_start(void);
 void rcu_sysrq_end(void);
index 9874f6f675374010c657d36f6c1b85f1f3a76bda..1ac79bcee2bb71e4f3ae1a18b87dc55bd64554dc 100644 (file)
@@ -44,6 +44,9 @@
 #define SZ_2G                          0x80000000
 
 #define SZ_4G                          _AC(0x100000000, ULL)
+#define SZ_8G                          _AC(0x200000000, ULL)
+#define SZ_16G                         _AC(0x400000000, ULL)
+#define SZ_32G                         _AC(0x800000000, ULL)
 #define SZ_64T                         _AC(0x400000000000, ULL)
 
 #endif /* __LINUX_SIZES_H__ */
index 333bcdc39635bcfd2d2682891831bae613c6fa5e..5f60c9e907c9d8eae1e85ae0329838383e3325df 100644 (file)
@@ -366,7 +366,7 @@ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
 static inline bool skb_frag_must_loop(struct page *p)
 {
 #if defined(CONFIG_HIGHMEM)
-       if (PageHighMem(p))
+       if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) || PageHighMem(p))
                return true;
 #endif
        return false;
@@ -1203,6 +1203,7 @@ struct skb_seq_state {
        struct sk_buff  *root_skb;
        struct sk_buff  *cur_skb;
        __u8            *frag_data;
+       __u32           frag_off;
 };
 
 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
index f3929aff39cf272fb73cd454516accc1146b9f4e..7688bc983de54a0c8e75ea585f5cea1749523b02 100644 (file)
@@ -251,6 +251,30 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
        static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
 #endif /* __SYSCALL_DEFINEx */
 
+/* For split 64-bit arguments on 32-bit architectures */
+#ifdef __LITTLE_ENDIAN
+#define SC_ARG64(name) u32, name##_lo, u32, name##_hi
+#else
+#define SC_ARG64(name) u32, name##_hi, u32, name##_lo
+#endif
+#define SC_VAL64(type, name) ((type) name##_hi << 32 | name##_lo)
+
+#ifdef CONFIG_COMPAT
+#define SYSCALL32_DEFINE1 COMPAT_SYSCALL_DEFINE1
+#define SYSCALL32_DEFINE2 COMPAT_SYSCALL_DEFINE2
+#define SYSCALL32_DEFINE3 COMPAT_SYSCALL_DEFINE3
+#define SYSCALL32_DEFINE4 COMPAT_SYSCALL_DEFINE4
+#define SYSCALL32_DEFINE5 COMPAT_SYSCALL_DEFINE5
+#define SYSCALL32_DEFINE6 COMPAT_SYSCALL_DEFINE6
+#else
+#define SYSCALL32_DEFINE1 SYSCALL_DEFINE1
+#define SYSCALL32_DEFINE2 SYSCALL_DEFINE2
+#define SYSCALL32_DEFINE3 SYSCALL_DEFINE3
+#define SYSCALL32_DEFINE4 SYSCALL_DEFINE4
+#define SYSCALL32_DEFINE5 SYSCALL_DEFINE5
+#define SYSCALL32_DEFINE6 SYSCALL_DEFINE6
+#endif
+
 /*
  * Called before coming back to user-mode. Returning to user-mode with an
  * address limit different than USER_DS can allow to overwrite kernel memory.
index 88a7673894d5e624e7ffa51eb3c16d9043080e88..cfbfd6fe01dfad22f81c8090c4a9dfdcb1d6a28a 100644 (file)
@@ -81,6 +81,8 @@ struct usbnet {
 #              define EVENT_LINK_CHANGE        11
 #              define EVENT_SET_RX_MODE        12
 #              define EVENT_NO_IP_ALIGN        13
+       u32                     rx_speed;       /* in bps - NOT Mbps */
+       u32                     tx_speed;       /* in bps - NOT Mbps */
 };
 
 static inline struct usb_driver *driver_of(struct usb_interface *intf)
index 9a4bbccddc7f7fff5a86109eef2dd2f831a3b5ad..0d6f7ec860615c6146d677bda5a652ad205c03f2 100644 (file)
@@ -1756,7 +1756,7 @@ struct cfg80211_sar_specs {
 
 
 /**
- * @struct cfg80211_sar_chan_ranges - sar frequency ranges
+ * struct cfg80211_sar_freq_ranges - sar frequency ranges
  * @start_freq:  start range edge frequency
  * @end_freq:    end range edge frequency
  */
@@ -3972,6 +3972,8 @@ struct mgmt_frame_regs {
  *     This callback may sleep.
  * @reset_tid_config: Reset TID specific configuration for the peer, for the
  *     given TIDs. This callback may sleep.
+ *
+ * @set_sar_specs: Update the SAR (TX power) settings.
  */
 struct cfg80211_ops {
        int     (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -4929,6 +4931,7 @@ struct wiphy_iftype_akm_suites {
  * @max_data_retry_count: maximum supported per TID retry count for
  *     configuration through the %NL80211_TID_CONFIG_ATTR_RETRY_SHORT and
  *     %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes
+ * @sar_capa: SAR control capabilities
  */
 struct wiphy {
        /* assign these fields before you register the wiphy */
index 7338b3865a2a3d278dc27c0167bba1b966bbda9f..111d7771b208150d4e6f445b2675648b4de0eb7d 100644 (file)
@@ -76,6 +76,8 @@ struct inet_connection_sock_af_ops {
  * @icsk_ext_hdr_len:     Network protocol overhead (IP/IPv6 options)
  * @icsk_ack:             Delayed ACK control data
  * @icsk_mtup;            MTU probing control data
+ * @icsk_probes_tstamp:    Probe timestamp (cleared by non-zero window ack)
+ * @icsk_user_timeout:    TCP_USER_TIMEOUT value
  */
 struct inet_connection_sock {
        /* inet_sock has to be the first member! */
@@ -129,6 +131,7 @@ struct inet_connection_sock {
 
                u32               probe_timestamp;
        } icsk_mtup;
+       u32                       icsk_probes_tstamp;
        u32                       icsk_user_timeout;
 
        u64                       icsk_ca_priv[104 / sizeof(u64)];
index d315740581f1ebc46de1e4180cc6ad9023cf1897..2bdbf62f4ecd7b4e7e62e45263176733fc5db02a 100644 (file)
@@ -3880,6 +3880,7 @@ enum ieee80211_reconfig_type {
  *     This callback may sleep.
  * @sta_set_4addr: Called to notify the driver when a station starts/stops using
  *     4-address mode
+ * @set_sar_specs: Update the SAR (TX power) settings.
  */
 struct ieee80211_ops {
        void (*tx)(struct ieee80211_hw *hw,
index fc455445f4b226373b78ff4cce4fe3de2cddb0c0..932f0d79d60cbbab60db73baddc8b650935b3bf6 100644 (file)
@@ -168,12 +168,14 @@ static inline void red_set_vars(struct red_vars *v)
        v->qcount       = -1;
 }
 
-static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog)
+static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, u8 Scell_log)
 {
        if (fls(qth_min) + Wlog > 32)
                return false;
        if (fls(qth_max) + Wlog > 32)
                return false;
+       if (Scell_log >= 32)
+               return false;
        if (qth_max < qth_min)
                return false;
        return true;
index bdc4323ce53c957c30809b0773b2d558dce555b7..129d200bccb46581f9959a8ee553acbed94466cf 100644 (file)
@@ -1921,10 +1921,13 @@ static inline void sk_set_txhash(struct sock *sk)
        sk->sk_txhash = net_tx_rndhash();
 }
 
-static inline void sk_rethink_txhash(struct sock *sk)
+static inline bool sk_rethink_txhash(struct sock *sk)
 {
-       if (sk->sk_txhash)
+       if (sk->sk_txhash) {
                sk_set_txhash(sk);
+               return true;
+       }
+       return false;
 }
 
 static inline struct dst_entry *
@@ -1947,12 +1950,10 @@ sk_dst_get(struct sock *sk)
        return dst;
 }
 
-static inline void dst_negative_advice(struct sock *sk)
+static inline void __dst_negative_advice(struct sock *sk)
 {
        struct dst_entry *ndst, *dst = __sk_dst_get(sk);
 
-       sk_rethink_txhash(sk);
-
        if (dst && dst->ops->negative_advice) {
                ndst = dst->ops->negative_advice(dst);
 
@@ -1964,6 +1965,12 @@ static inline void dst_negative_advice(struct sock *sk)
        }
 }
 
+static inline void dst_negative_advice(struct sock *sk)
+{
+       sk_rethink_txhash(sk);
+       __dst_negative_advice(sk);
+}
+
 static inline void
 __sk_dst_set(struct sock *sk, struct dst_entry *dst)
 {
index 4f4e93bf814c3e66b392f4bca8dd6e565685f0bc..cc17bc957548257602a5d9a6cdd59bb704f4e278 100644 (file)
@@ -58,10 +58,6 @@ struct xdp_sock {
 
        struct xsk_queue *tx ____cacheline_aligned_in_smp;
        struct list_head tx_list;
-       /* Mutual exclusion of NAPI TX thread and sendmsg error paths
-        * in the SKB destructor callback.
-        */
-       spinlock_t tx_completion_lock;
        /* Protects generic receive. */
        spinlock_t rx_lock;
 
index 01755b838c745079c53b6daefd7a44322ef1d7c3..eaa8386dbc630b3d30c9b8b1f76d4e18a538d9f5 100644 (file)
@@ -73,6 +73,11 @@ struct xsk_buff_pool {
        bool dma_need_sync;
        bool unaligned;
        void *addrs;
+       /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
+        * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
+        * sockets share a single cq when the same netdev and queue id is shared.
+        */
+       spinlock_t cq_lock;
        struct xdp_buff_xsk *free_heads[];
 };
 
diff --git a/include/soc/nps/common.h b/include/soc/nps/common.h
deleted file mode 100644 (file)
index 8c18dc6..0000000
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef SOC_NPS_COMMON_H
-#define SOC_NPS_COMMON_H
-
-#ifdef CONFIG_SMP
-#define NPS_IPI_IRQ                                    5
-#endif
-
-#define NPS_HOST_REG_BASE                      0xF6000000
-
-#define NPS_MSU_BLKID                          0x018
-
-#define CTOP_INST_RSPI_GIC_0_R12               0x3C56117E
-#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST     0x5B60
-#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM     0x00010422
-
-#ifndef AUX_IENABLE
-#define AUX_IENABLE                            0x40c
-#endif
-
-#define CTOP_AUX_IACK                          (0xFFFFF800 + 0x088)
-
-#ifndef __ASSEMBLY__
-
-/* In order to increase compilation test coverage */
-#ifdef CONFIG_ARC
-static inline void nps_ack_gic(void)
-{
-       __asm__ __volatile__ (
-       "       .word %0\n"
-       :
-       : "i"(CTOP_INST_RSPI_GIC_0_R12)
-       : "memory");
-}
-#else
-static inline void nps_ack_gic(void) { }
-#define write_aux_reg(r, v)
-#define read_aux_reg(r) 0
-#endif
-
-/* CPU global ID */
-struct global_id {
-       union {
-               struct {
-#ifdef CONFIG_EZNPS_MTM_EXT
-                       u32 __reserved:20, cluster:4, core:4, thread:4;
-#else
-                       u32 __reserved:24, cluster:4, core:4;
-#endif
-               };
-               u32 value;
-       };
-};
-
-/*
- * Convert logical to physical CPU IDs
- *
- * The conversion swap bits 1 and 2 of cluster id (out of 4 bits)
- * Now quad of logical clusters id's are adjacent physically,
- * and not like the id's physically came with each cluster.
- * Below table is 4x4 mesh of core clusters as it layout on chip.
- * Cluster ids are in format: logical (physical)
- *
- *    -----------------   ------------------
- * 3 |  5 (3)   7 (7)  | | 13 (11)   15 (15)|
- *
- * 2 |  4 (2)   6 (6)  | | 12 (10)   14 (14)|
- *    -----------------   ------------------
- * 1 |  1 (1)   3 (5)  | |  9  (9)   11 (13)|
- *
- * 0 |  0 (0)   2 (4)  | |  8  (8)   10 (12)|
- *    -----------------   ------------------
- *       0       1            2        3
- */
-static inline int nps_cluster_logic_to_phys(int cluster)
-{
-#ifdef __arc__
-        __asm__ __volatile__(
-       "       mov r3,%0\n"
-       "       .short %1\n"
-       "       .word %2\n"
-       "       mov %0,r3\n"
-       : "+r"(cluster)
-       : "i"(CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST),
-         "i"(CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM)
-       : "r3");
-#endif
-
-       return cluster;
-}
-
-#define NPS_CPU_TO_CLUSTER_NUM(cpu) \
-       ({ struct global_id gid; gid.value = cpu; \
-               nps_cluster_logic_to_phys(gid.cluster); })
-
-struct nps_host_reg_address {
-       union {
-               struct {
-                       u32 base:8, cl_x:4, cl_y:4,
-                       blkid:6, reg:8, __reserved:2;
-               };
-               u32 value;
-       };
-};
-
-struct nps_host_reg_address_non_cl {
-       union {
-               struct {
-                       u32 base:7, blkid:11, reg:12, __reserved:2;
-               };
-               u32 value;
-       };
-};
-
-static inline void *nps_host_reg_non_cl(u32 blkid, u32 reg)
-{
-       struct nps_host_reg_address_non_cl reg_address;
-
-       reg_address.value = NPS_HOST_REG_BASE;
-       reg_address.blkid = blkid;
-       reg_address.reg = reg;
-
-       return (void *)reg_address.value;
-}
-
-static inline void *nps_host_reg(u32 cpu, u32 blkid, u32 reg)
-{
-       struct nps_host_reg_address reg_address;
-       u32 cl = NPS_CPU_TO_CLUSTER_NUM(cpu);
-
-       reg_address.value = NPS_HOST_REG_BASE;
-       reg_address.cl_x  = (cl >> 2) & 0x3;
-       reg_address.cl_y  = cl & 0x3;
-       reg_address.blkid = blkid;
-       reg_address.reg   = reg;
-
-       return (void *)reg_address.value;
-}
-#endif /* __ASSEMBLY__ */
-
-#endif /* SOC_NPS_COMMON_H */
diff --git a/include/soc/nps/mtm.h b/include/soc/nps/mtm.h
deleted file mode 100644 (file)
index d2f5e7e..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef SOC_NPS_MTM_H
-#define SOC_NPS_MTM_H
-
-#define CTOP_INST_HWSCHD_OFF_R3                 0x3B6F00BF
-#define CTOP_INST_HWSCHD_RESTORE_R3             0x3E6F70C3
-
-static inline void hw_schd_save(unsigned int *flags)
-{
-       __asm__ __volatile__(
-       "       .word %1\n"
-       "       st r3,[%0]\n"
-       :
-       : "r"(flags), "i"(CTOP_INST_HWSCHD_OFF_R3)
-       : "r3", "memory");
-}
-
-static inline void hw_schd_restore(unsigned int flags)
-{
-       __asm__ __volatile__(
-       "       mov r3, %0\n"
-       "       .word %1\n"
-       :
-       : "r"(flags), "i"(CTOP_INST_HWSCHD_RESTORE_R3)
-       : "r3");
-}
-
-#endif /* SOC_NPS_MTM_H */
index 4eef374d4413e2ce79254846fafda749be3a05ce..4a5cc8c64be3470b3ecf18b6d7aec66233089f07 100644 (file)
@@ -231,6 +231,7 @@ enum afs_file_error {
        afs_file_error_dir_bad_magic,
        afs_file_error_dir_big,
        afs_file_error_dir_missing_page,
+       afs_file_error_dir_name_too_long,
        afs_file_error_dir_over_end,
        afs_file_error_dir_small,
        afs_file_error_dir_unmarked_ext,
@@ -488,6 +489,7 @@ enum afs_cb_break_reason {
        EM(afs_file_error_dir_bad_magic,        "DIR_BAD_MAGIC")        \
        EM(afs_file_error_dir_big,              "DIR_BIG")              \
        EM(afs_file_error_dir_missing_page,     "DIR_MISSING_PAGE")     \
+       EM(afs_file_error_dir_name_too_long,    "DIR_NAME_TOO_LONG")    \
        EM(afs_file_error_dir_over_end,         "DIR_ENT_OVER_END")     \
        EM(afs_file_error_dir_small,            "DIR_SMALL")            \
        EM(afs_file_error_dir_unmarked_ext,     "DIR_UNMARKED_EXT")     \
index 58994e01302216d9b935f6ac3a3bb4e7fb794077..6f89c27265f5895587416a2ff1aae928c18543c0 100644 (file)
@@ -1424,13 +1424,61 @@ TRACE_EVENT(rpcb_unregister,
        )
 );
 
+/* Record an xdr_buf containing a fully-formed RPC message */
+DECLARE_EVENT_CLASS(svc_xdr_msg_class,
+       TP_PROTO(
+               const struct xdr_buf *xdr
+       ),
+
+       TP_ARGS(xdr),
+
+       TP_STRUCT__entry(
+               __field(u32, xid)
+               __field(const void *, head_base)
+               __field(size_t, head_len)
+               __field(const void *, tail_base)
+               __field(size_t, tail_len)
+               __field(unsigned int, page_len)
+               __field(unsigned int, msg_len)
+       ),
+
+       TP_fast_assign(
+               __be32 *p = (__be32 *)xdr->head[0].iov_base;
+
+               __entry->xid = be32_to_cpu(*p);
+               __entry->head_base = p;
+               __entry->head_len = xdr->head[0].iov_len;
+               __entry->tail_base = xdr->tail[0].iov_base;
+               __entry->tail_len = xdr->tail[0].iov_len;
+               __entry->page_len = xdr->page_len;
+               __entry->msg_len = xdr->len;
+       ),
+
+       TP_printk("xid=0x%08x head=[%p,%zu] page=%u tail=[%p,%zu] len=%u",
+               __entry->xid,
+               __entry->head_base, __entry->head_len, __entry->page_len,
+               __entry->tail_base, __entry->tail_len, __entry->msg_len
+       )
+);
+
+#define DEFINE_SVCXDRMSG_EVENT(name)                                   \
+               DEFINE_EVENT(svc_xdr_msg_class,                         \
+                               svc_xdr_##name,                         \
+                               TP_PROTO(                               \
+                                       const struct xdr_buf *xdr       \
+                               ),                                      \
+                               TP_ARGS(xdr))
+
+DEFINE_SVCXDRMSG_EVENT(recvfrom);
+
+/* Record an xdr_buf containing arbitrary data, tagged with an XID */
 DECLARE_EVENT_CLASS(svc_xdr_buf_class,
        TP_PROTO(
-               const struct svc_rqst *rqst,
+               __be32 xid,
                const struct xdr_buf *xdr
        ),
 
-       TP_ARGS(rqst, xdr),
+       TP_ARGS(xid, xdr),
 
        TP_STRUCT__entry(
                __field(u32, xid)
@@ -1443,7 +1491,7 @@ DECLARE_EVENT_CLASS(svc_xdr_buf_class,
        ),
 
        TP_fast_assign(
-               __entry->xid = be32_to_cpu(rqst->rq_xid);
+               __entry->xid = be32_to_cpu(xid);
                __entry->head_base = xdr->head[0].iov_base;
                __entry->head_len = xdr->head[0].iov_len;
                __entry->tail_base = xdr->tail[0].iov_base;
@@ -1463,12 +1511,11 @@ DECLARE_EVENT_CLASS(svc_xdr_buf_class,
                DEFINE_EVENT(svc_xdr_buf_class,                         \
                                svc_xdr_##name,                         \
                                TP_PROTO(                               \
-                                       const struct svc_rqst *rqst,    \
+                                       __be32 xid,                     \
                                        const struct xdr_buf *xdr       \
                                ),                                      \
-                               TP_ARGS(rqst, xdr))
+                               TP_ARGS(xid, xdr))
 
-DEFINE_SVCXDRBUF_EVENT(recvfrom);
 DEFINE_SVCXDRBUF_EVENT(sendto);
 
 /*
index 52e8bcb339811c93cfbc50588fa1e442f1d7a4bb..cf7399f03b712397702ba6a6c69bd1732f395cbf 100644 (file)
@@ -213,7 +213,7 @@ struct cache_sb_disk {
                __le16          keys;
        };
        __le64                  d[SB_JOURNAL_BUCKETS];  /* journal buckets */
-       __le16                  bucket_size_hi;
+       __le16                  obso_bucket_size_hi;    /* obsoleted */
 };
 
 /*
index 874cc12a34d9daa544c143e6eb2e1ebd60628e23..82708c6db432251e7a680c3dc657cc048c6f58e9 100644 (file)
@@ -75,8 +75,9 @@ struct rtnl_link_stats {
  *
  * @rx_dropped: Number of packets received but not processed,
  *   e.g. due to lack of resources or unsupported protocol.
- *   For hardware interfaces this counter should not include packets
- *   dropped by the device which are counted separately in
+ *   For hardware interfaces this counter may include packets discarded
+ *   due to L2 address filtering but should not include packets dropped
+ *   by the device due to buffer exhaustion which are counted separately in
  *   @rx_missed_errors (since procfs folds those two counters together).
  *
  * @tx_dropped: Number of packets dropped on their way to transmission,
index 886802b8ffba3617f64b47f7f6f452335fdfa23e..374c67875cdbd5c60eb15c1b932e7fcdc7f9fa7d 100644 (file)
@@ -251,6 +251,7 @@ struct kvm_hyperv_exit {
 #define KVM_EXIT_X86_RDMSR        29
 #define KVM_EXIT_X86_WRMSR        30
 #define KVM_EXIT_DIRTY_RING_FULL  31
+#define KVM_EXIT_AP_RESET_HOLD    32
 
 /* For KVM_EXIT_INTERNAL_ERROR */
 /* Emulate instruction failed. */
@@ -573,6 +574,7 @@ struct kvm_vapic_addr {
 #define KVM_MP_STATE_CHECK_STOP        6
 #define KVM_MP_STATE_OPERATING         7
 #define KVM_MP_STATE_LOAD              8
+#define KVM_MP_STATE_AP_RESET_HOLD     9
 
 struct kvm_mp_state {
        __u32 mp_state;
index 28b6ee53305f4a8cf5be34326fa5dd6f4550951d..b1633e7ba529694e20056148f63897ca2baa39c7 100644 (file)
@@ -293,6 +293,7 @@ enum nft_rule_compat_attributes {
  * @NFT_SET_EVAL: set can be updated from the evaluation path
  * @NFT_SET_OBJECT: set contains stateful objects
  * @NFT_SET_CONCAT: set contains a concatenation
+ * @NFT_SET_EXPR: set contains expressions
  */
 enum nft_set_flags {
        NFT_SET_ANONYMOUS               = 0x1,
@@ -303,6 +304,7 @@ enum nft_set_flags {
        NFT_SET_EVAL                    = 0x20,
        NFT_SET_OBJECT                  = 0x40,
        NFT_SET_CONCAT                  = 0x80,
+       NFT_SET_EXPR                    = 0x100,
 };
 
 /**
@@ -706,6 +708,7 @@ enum nft_dynset_ops {
 
 enum nft_dynset_flags {
        NFT_DYNSET_F_INV        = (1 << 0),
+       NFT_DYNSET_F_EXPR       = (1 << 1),
 };
 
 /**
index 8dbecb3ad03684f69b14b4b2b7637a3d73e9f690..1cc5ce0ae062ec179d7badb3c9006dbef689cb21 100644 (file)
@@ -116,7 +116,7 @@ struct pppol2tp_ioc_stats {
 #define PPPIOCGCHAN    _IOR('t', 55, int)      /* get ppp channel number */
 #define PPPIOCGL2TPSTATS _IOR('t', 54, struct pppol2tp_ioc_stats)
 #define PPPIOCBRIDGECHAN _IOW('t', 53, int)    /* bridge one channel to another */
-#define PPPIOCUNBRIDGECHAN _IO('t', 54)        /* unbridge channel */
+#define PPPIOCUNBRIDGECHAN _IO('t', 52)        /* unbridge channel */
 
 #define SIOCGPPPSTATS   (SIOCDEVPRIVATE + 0)
 #define SIOCGPPPVER     (SIOCDEVPRIVATE + 1)   /* NEVER change this!! */
index 8c15a7d336a0d1889d821f3f5ddeb583fee5f212..dba3827c43ca474a1ca07ec84dbbbf5206e9f494 100644 (file)
@@ -279,6 +279,7 @@ enum hl_device_status {
  * HL_INFO_CLK_THROTTLE_REASON - Retrieve clock throttling reason
  * HL_INFO_SYNC_MANAGER  - Retrieve sync manager info per dcore
  * HL_INFO_TOTAL_ENERGY  - Retrieve total energy consumption
+ * HL_INFO_PLL_FREQUENCY - Retrieve PLL frequency
  */
 #define HL_INFO_HW_IP_INFO             0
 #define HL_INFO_HW_EVENTS              1
@@ -425,6 +426,8 @@ struct hl_info_sync_manager {
  * @ctx_device_in_reset_drop_cnt: context dropped due to device in reset
  * @total_max_cs_in_flight_drop_cnt: total dropped due to maximum CS in-flight
  * @ctx_max_cs_in_flight_drop_cnt: context dropped due to maximum CS in-flight
+ * @total_validation_drop_cnt: total dropped due to validation error
+ * @ctx_validation_drop_cnt: context dropped due to validation error
  */
 struct hl_info_cs_counters {
        __u64 total_out_of_mem_drop_cnt;
@@ -437,6 +440,8 @@ struct hl_info_cs_counters {
        __u64 ctx_device_in_reset_drop_cnt;
        __u64 total_max_cs_in_flight_drop_cnt;
        __u64 ctx_max_cs_in_flight_drop_cnt;
+       __u64 total_validation_drop_cnt;
+       __u64 ctx_validation_drop_cnt;
 };
 
 enum gaudi_dcores {
index 00c7235ae93e7daae3ce106f76fc5dfce1cd1bdd..2c43b0ef1e4d504a652d0d574d5b6e41fa107091 100644 (file)
@@ -192,7 +192,7 @@ void xs_suspend_cancel(void);
 
 struct work_struct;
 
-void xenbus_probe(struct work_struct *);
+void xenbus_probe(void);
 
 #define XENBUS_IS_ERR_READ(str) ({                     \
        if (!IS_ERR(str) && strlen(str) == 0) {         \
index 6feee7f11eafca4de9fb4dd2101edeb4d1125bef..c68d784376ca1058d58e3a6dfb664420c6ded461 100644 (file)
@@ -1480,14 +1480,8 @@ void __init console_on_rootfs(void)
        struct file *file = filp_open("/dev/console", O_RDWR, 0);
 
        if (IS_ERR(file)) {
-               pr_err("Warning: unable to open an initial console. Fallback to ttynull.\n");
-               register_ttynull_console();
-
-               file = filp_open("/dev/console", O_RDWR, 0);
-               if (IS_ERR(file)) {
-                       pr_err("Warning: Failed to add ttynull console. No stdin, stdout, and stderr for the init process!\n");
-                       return;
-               }
+               pr_err("Warning: unable to open an initial console.\n");
+               return;
        }
        init_dup(file);
        init_dup(file);
@@ -1518,6 +1512,7 @@ static noinline void __init kernel_init_freeable(void)
 
        init_mm_internals();
 
+       rcu_init_tasks_generic();
        do_pre_smp_initcalls();
        lockup_detector_init();
 
index 6edff97ad594bd5d6e98a8971723a4c07436db1b..2f0597320b6d279871834c2deea3a662736b7373 100644 (file)
@@ -176,14 +176,14 @@ BPF_CALL_4(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode,
         * bpf_local_storage_update expects the owner to have a
         * valid storage pointer.
         */
-       if (!inode_storage_ptr(inode))
+       if (!inode || !inode_storage_ptr(inode))
                return (unsigned long)NULL;
 
        sdata = inode_storage_lookup(inode, map, true);
        if (sdata)
                return (unsigned long)sdata->data;
 
-       /* This helper must only called from where the inode is gurranteed
+       /* This helper must only called from where the inode is guaranteed
         * to have a refcount and cannot be freed.
         */
        if (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) {
@@ -200,7 +200,10 @@ BPF_CALL_4(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode,
 BPF_CALL_2(bpf_inode_storage_delete,
           struct bpf_map *, map, struct inode *, inode)
 {
-       /* This helper must only called from where the inode is gurranteed
+       if (!inode)
+               return -EINVAL;
+
+       /* This helper must only called from where the inode is guaranteed
         * to have a refcount and cannot be freed.
         */
        return inode_storage_delete(inode, map);
index 4ef1959a78f27fcc610b1d31dcc6f4ca489f2f72..e0da0258b732dd318304a6357984ec6f50947f42 100644 (file)
@@ -218,7 +218,7 @@ BPF_CALL_4(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
         * bpf_local_storage_update expects the owner to have a
         * valid storage pointer.
         */
-       if (!task_storage_ptr(task))
+       if (!task || !task_storage_ptr(task))
                return (unsigned long)NULL;
 
        sdata = task_storage_lookup(task, map, true);
@@ -243,6 +243,9 @@ BPF_CALL_4(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
 BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *,
           task)
 {
+       if (!task)
+               return -EINVAL;
+
        /* This helper must only be called from places where the lifetime of the task
         * is guaranteed. Either by being refcounted or by being protected
         * by an RCU read-side critical section.
index 8d6bdb4f4d61819c873a437a8d3ef50eebe767fc..84a36ee4a4c20fa894a2b82cf05820e2576e7a03 100644 (file)
@@ -4172,7 +4172,7 @@ static int btf_parse_hdr(struct btf_verifier_env *env)
                return -ENOTSUPP;
        }
 
-       if (btf_data_size == hdr->hdr_len) {
+       if (!btf->base_btf && btf_data_size == hdr->hdr_len) {
                btf_verifier_log(env, "No data");
                return -EINVAL;
        }
index 6ec088a96302f980ee7e56feaf626ad078d03a77..96555a8a2c545435aee44fd2fe4bbc37532357c5 100644 (file)
@@ -1391,12 +1391,13 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
                if (ctx.optlen != 0) {
                        *optlen = ctx.optlen;
                        *kernel_optval = ctx.optval;
+                       /* export and don't free sockopt buf */
+                       return 0;
                }
        }
 
 out:
-       if (ret)
-               sockopt_free_buf(&ctx);
+       sockopt_free_buf(&ctx);
        return ret;
 }
 
index 7e848200cd268a0f9ed063f0b641d3c355787013..c1ac7f964bc997925fd427f5192168829d812e5d 100644 (file)
@@ -152,6 +152,7 @@ static void htab_init_buckets(struct bpf_htab *htab)
                        lockdep_set_class(&htab->buckets[i].lock,
                                          &htab->lockdep_key);
                }
+               cond_resched();
        }
 }
 
index bd8a3183d0302efd7147e84b30054cf9aa6c10e5..41ca280b1dc19428d626ef16ed8db8eab58892df 100644 (file)
@@ -108,7 +108,7 @@ BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
 }
 
 const struct bpf_func_proto bpf_map_peek_elem_proto = {
-       .func           = bpf_map_pop_elem,
+       .func           = bpf_map_peek_elem,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_CONST_MAP_PTR,
index 4caf06fe41524f81d204ba650f4791c1bac86156..e5999d86c76ea17ca49a4cf8851cf51cc20d9a3d 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/fs.h>
 #include <linux/license.h>
 #include <linux/filter.h>
-#include <linux/version.h>
 #include <linux/kernel.h>
 #include <linux/idr.h>
 #include <linux/cred.h>
@@ -2713,7 +2712,6 @@ out_unlock:
 out_put_prog:
        if (tgt_prog_fd && tgt_prog)
                bpf_prog_put(tgt_prog);
-       bpf_prog_put(prog);
        return err;
 }
 
@@ -2826,7 +2824,10 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
                        tp_name = prog->aux->attach_func_name;
                        break;
                }
-               return bpf_tracing_prog_attach(prog, 0, 0);
+               err = bpf_tracing_prog_attach(prog, 0, 0);
+               if (err >= 0)
+                       return err;
+               goto out_put_prog;
        case BPF_PROG_TYPE_RAW_TRACEPOINT:
        case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
                if (strncpy_from_user(buf,
index e73c075930245938b7e6d3cbccbc4428928a7faf..175b7b42bfc46da2f079c859b6742e270efa448c 100644 (file)
@@ -37,7 +37,7 @@ retry:
                if (!task) {
                        ++*tid;
                        goto retry;
-               } else if (skip_if_dup_files && task->tgid != task->pid &&
+               } else if (skip_if_dup_files && !thread_group_leader(task) &&
                           task->files == task->group_leader->files) {
                        put_task_struct(task);
                        task = NULL;
@@ -151,13 +151,14 @@ again:
                curr_task = info->task;
                curr_fd = info->fd;
        } else {
-               curr_task = task_seq_get_next(ns, &curr_tid, true);
-               if (!curr_task) {
-                       info->task = NULL;
-                       return NULL;
-               }
-
-               /* set info->task and info->tid */
+                curr_task = task_seq_get_next(ns, &curr_tid, true);
+                if (!curr_task) {
+                        info->task = NULL;
+                        info->tid = curr_tid;
+                        return NULL;
+                }
+
+                /* set info->task and info->tid */
                info->task = curr_task;
                if (curr_tid == info->tid) {
                        curr_fd = info->fd;
index 17270b8404f173ff8e164fa542d11fa73b571cf9..e7368c5eacb7bdc508099841bc447612624faa7c 100644 (file)
@@ -2217,6 +2217,8 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
        case PTR_TO_RDWR_BUF:
        case PTR_TO_RDWR_BUF_OR_NULL:
        case PTR_TO_PERCPU_BTF_ID:
+       case PTR_TO_MEM:
+       case PTR_TO_MEM_OR_NULL:
                return true;
        default:
                return false;
@@ -5311,7 +5313,7 @@ static bool signed_add_overflows(s64 a, s64 b)
        return res < a;
 }
 
-static bool signed_add32_overflows(s64 a, s64 b)
+static bool signed_add32_overflows(s32 a, s32 b)
 {
        /* Do the add in u32, where overflow is well-defined */
        s32 res = (s32)((u32)a + (u32)b);
@@ -5321,7 +5323,7 @@ static bool signed_add32_overflows(s64 a, s64 b)
        return res < a;
 }
 
-static bool signed_sub_overflows(s32 a, s32 b)
+static bool signed_sub_overflows(s64 a, s64 b)
 {
        /* Do the sub in u64, where overflow is well-defined */
        s64 res = (s64)((u64)a - (u64)b);
@@ -5333,7 +5335,7 @@ static bool signed_sub_overflows(s32 a, s32 b)
 
 static bool signed_sub32_overflows(s32 a, s32 b)
 {
-       /* Do the sub in u64, where overflow is well-defined */
+       /* Do the sub in u32, where overflow is well-defined */
        s32 res = (s32)((u32)a - (u32)b);
 
        if (b < 0)
index 191c329e482ad97952445f4544f18efc83b26cdd..32596fdbcd5b8ecc77374a4bc80bf004f37a314a 100644 (file)
@@ -908,6 +908,8 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
        opt = fs_parse(fc, cgroup1_fs_parameters, param, &result);
        if (opt == -ENOPARAM) {
                if (strcmp(param->key, "source") == 0) {
+                       if (fc->source)
+                               return invalf(fc, "Multiple sources not supported");
                        fc->source = param->string;
                        param->string = NULL;
                        return 0;
index fefa21981027da293d5d3e213bc8949f3913cfc7..613845769103cd36ab9ef9340b9dd90f5c20f5f9 100644 (file)
@@ -244,7 +244,7 @@ bool cgroup_ssid_enabled(int ssid)
  *
  * The default hierarchy is the v2 interface of cgroup and this function
  * can be used to test whether a cgroup is on the default hierarchy for
- * cases where a subsystem should behave differnetly depending on the
+ * cases where a subsystem should behave differently depending on the
  * interface version.
  *
  * List of changed behaviors:
@@ -262,7 +262,7 @@ bool cgroup_ssid_enabled(int ssid)
  *   "cgroup.procs" instead.
  *
  * - "cgroup.procs" is not sorted.  pids will be unique unless they got
- *   recycled inbetween reads.
+ *   recycled in-between reads.
  *
  * - "release_agent" and "notify_on_release" are removed.  Replacement
  *   notification mechanism will be implemented.
@@ -342,7 +342,7 @@ static bool cgroup_is_mixable(struct cgroup *cgrp)
        return !cgroup_parent(cgrp);
 }
 
-/* can @cgrp become a thread root? should always be true for a thread root */
+/* can @cgrp become a thread root? Should always be true for a thread root */
 static bool cgroup_can_be_thread_root(struct cgroup *cgrp)
 {
        /* mixables don't care */
@@ -527,7 +527,7 @@ static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp,
  * the root css is returned, so this function always returns a valid css.
  *
  * The returned css is not guaranteed to be online, and therefore it is the
- * callers responsiblity to tryget a reference for it.
+ * callers responsibility to try get a reference for it.
  */
 struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
                                         struct cgroup_subsys *ss)
@@ -699,7 +699,7 @@ EXPORT_SYMBOL_GPL(of_css);
                        ;                                               \
                else
 
-/* walk live descendants in preorder */
+/* walk live descendants in pre order */
 #define cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp)         \
        css_for_each_descendant_pre((d_css), cgroup_css((cgrp), NULL))  \
                if (({ lockdep_assert_held(&cgroup_mutex);              \
@@ -933,7 +933,7 @@ void put_css_set_locked(struct css_set *cset)
 
        WARN_ON_ONCE(!list_empty(&cset->threaded_csets));
 
-       /* This css_set is dead. unlink it and release cgroup and css refs */
+       /* This css_set is dead. Unlink it and release cgroup and css refs */
        for_each_subsys(ss, ssid) {
                list_del(&cset->e_cset_node[ssid]);
                css_put(cset->subsys[ssid]);
@@ -1058,7 +1058,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset,
 
        /*
         * Build the set of subsystem state objects that we want to see in the
-        * new css_set. while subsystems can change globally, the entries here
+        * new css_set. While subsystems can change globally, the entries here
         * won't change, so no need for locking.
         */
        for_each_subsys(ss, i) {
@@ -1148,7 +1148,7 @@ static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
 
        /*
         * Always add links to the tail of the lists so that the lists are
-        * in choronological order.
+        * in chronological order.
         */
        list_move_tail(&link->cset_link, &cgrp->cset_links);
        list_add_tail(&link->cgrp_link, &cset->cgrp_links);
@@ -3654,7 +3654,7 @@ static ssize_t cgroup_freeze_write(struct kernfs_open_file *of,
 
 static int cgroup_file_open(struct kernfs_open_file *of)
 {
-       struct cftype *cft = of->kn->priv;
+       struct cftype *cft = of_cft(of);
 
        if (cft->open)
                return cft->open(of);
@@ -3663,7 +3663,7 @@ static int cgroup_file_open(struct kernfs_open_file *of)
 
 static void cgroup_file_release(struct kernfs_open_file *of)
 {
-       struct cftype *cft = of->kn->priv;
+       struct cftype *cft = of_cft(of);
 
        if (cft->release)
                cft->release(of);
@@ -3674,7 +3674,7 @@ static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
 {
        struct cgroup_namespace *ns = current->nsproxy->cgroup_ns;
        struct cgroup *cgrp = of->kn->parent->priv;
-       struct cftype *cft = of->kn->priv;
+       struct cftype *cft = of_cft(of);
        struct cgroup_subsys_state *css;
        int ret;
 
@@ -3724,7 +3724,7 @@ static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
 
 static __poll_t cgroup_file_poll(struct kernfs_open_file *of, poll_table *pt)
 {
-       struct cftype *cft = of->kn->priv;
+       struct cftype *cft = of_cft(of);
 
        if (cft->poll)
                return cft->poll(of, pt);
@@ -4134,7 +4134,7 @@ struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
         * implies that if we observe !CSS_RELEASED on @pos in this RCU
         * critical section, the one pointed to by its next pointer is
         * guaranteed to not have finished its RCU grace period even if we
-        * have dropped rcu_read_lock() inbetween iterations.
+        * have dropped rcu_read_lock() in-between iterations.
         *
         * If @pos has CSS_RELEASED set, its next pointer can't be
         * dereferenced; however, as each css is given a monotonically
@@ -4382,7 +4382,7 @@ static struct css_set *css_task_iter_next_css_set(struct css_task_iter *it)
 }
 
 /**
- * css_task_iter_advance_css_set - advance a task itererator to the next css_set
+ * css_task_iter_advance_css_set - advance a task iterator to the next css_set
  * @it: the iterator to advance
  *
  * Advance @it to the next css_set to walk.
@@ -6308,7 +6308,7 @@ struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
  *
  * Find the cgroup at @path on the default hierarchy, increment its
  * reference count and return it.  Returns pointer to the found cgroup on
- * success, ERR_PTR(-ENOENT) if @path doens't exist and ERR_PTR(-ENOTDIR)
+ * success, ERR_PTR(-ENOENT) if @path doesn't exist and ERR_PTR(-ENOTDIR)
  * if @path points to a non-directory.
  */
 struct cgroup *cgroup_get_from_path(const char *path)
index 53d688bdd894cb51aced0f4981dc562d9b6fe7e7..eb0029c9a6a63c2c9f4b32f173b8b5b1e0cb201c 100644 (file)
@@ -81,7 +81,6 @@ CONFIG_INPUT_JOYSTICK=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_TABLET=y
 CONFIG_INPUT_UINPUT=y
-CONFIG_ION=y
 CONFIG_JOYSTICK_XPAD=y
 CONFIG_JOYSTICK_XPAD_FF=y
 CONFIG_JOYSTICK_XPAD_LEDS=y
index 3594291a854286545de8c33048bb57396871b083..04029e35e69af40942815b96e39ebb576f8394c3 100644 (file)
@@ -63,6 +63,7 @@
 #include <linux/random.h>
 #include <linux/rcuwait.h>
 #include <linux/compat.h>
+#include <linux/io_uring.h>
 
 #include <linux/uaccess.h>
 #include <asm/unistd.h>
@@ -776,6 +777,7 @@ void __noreturn do_exit(long code)
                schedule();
        }
 
+       io_uring_files_cancel(tsk->files);
        exit_signals(tsk);  /* sets PF_EXITING */
 
        /* sync mm's RSS info before statistics gathering */
index 35bdcfd84d42827dc95cdc57e75994ed1fe483d9..36607551f96652048ada29dfd8c70bd0482da55a 100644 (file)
@@ -241,7 +241,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
        }
 }
 
-/* Spawn RCU-tasks grace-period kthread, e.g., at core_initcall() time. */
+/* Spawn RCU-tasks grace-period kthread. */
 static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
 {
        struct task_struct *t;
@@ -564,7 +564,6 @@ static int __init rcu_spawn_tasks_kthread(void)
        rcu_spawn_tasks_kthread_generic(&rcu_tasks);
        return 0;
 }
-core_initcall(rcu_spawn_tasks_kthread);
 
 #if !defined(CONFIG_TINY_RCU)
 void show_rcu_tasks_classic_gp_kthread(void)
@@ -692,7 +691,6 @@ static int __init rcu_spawn_tasks_rude_kthread(void)
        rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
        return 0;
 }
-core_initcall(rcu_spawn_tasks_rude_kthread);
 
 #if !defined(CONFIG_TINY_RCU)
 void show_rcu_tasks_rude_gp_kthread(void)
@@ -968,6 +966,11 @@ static void rcu_tasks_trace_pregp_step(void)
 static void rcu_tasks_trace_pertask(struct task_struct *t,
                                    struct list_head *hop)
 {
+       // During early boot when there is only the one boot CPU, there
+       // is no idle task for the other CPUs. Just return.
+       if (unlikely(t == NULL))
+               return;
+
        WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
        WRITE_ONCE(t->trc_reader_checked, false);
        t->trc_ipi_to_cpu = -1;
@@ -1193,7 +1196,6 @@ static int __init rcu_spawn_tasks_trace_kthread(void)
        rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
        return 0;
 }
-core_initcall(rcu_spawn_tasks_trace_kthread);
 
 #if !defined(CONFIG_TINY_RCU)
 void show_rcu_tasks_trace_gp_kthread(void)
@@ -1222,6 +1224,21 @@ void show_rcu_tasks_gp_kthreads(void)
 }
 #endif /* #ifndef CONFIG_TINY_RCU */
 
+void __init rcu_init_tasks_generic(void)
+{
+#ifdef CONFIG_TASKS_RCU
+       rcu_spawn_tasks_kthread();
+#endif
+
+#ifdef CONFIG_TASKS_RUDE_RCU
+       rcu_spawn_tasks_rude_kthread();
+#endif
+
+#ifdef CONFIG_TASKS_TRACE_RCU
+       rcu_spawn_tasks_trace_kthread();
+#endif
+}
+
 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
 static inline void rcu_tasks_bootup_oddness(void) {}
 void show_rcu_tasks_gp_kthreads(void) {}
index 5736c55aaa1afc60d21294c79a0c8ad5d70efc09..6b9c431da08fe92d7d9c13866b4ddb2d767385b8 100644 (file)
@@ -2550,6 +2550,9 @@ bool get_signal(struct ksignal *ksig)
        struct signal_struct *signal = current->signal;
        int signr;
 
+       if (unlikely(current->task_works))
+               task_work_run();
+
        /*
         * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
         * that the arch handlers don't all have to do it. If we get here
index d5a19413d4f8a3945cdb53cd64c2fd887773730d..c1a62ae7e8128f8a0b89ac5c6faa0756a4665907 100644 (file)
@@ -538,7 +538,7 @@ config KPROBE_EVENTS
 config KPROBE_EVENTS_ON_NOTRACE
        bool "Do NOT protect notrace function from kprobe events"
        depends on KPROBE_EVENTS
-       depends on KPROBES_ON_FTRACE
+       depends on DYNAMIC_FTRACE
        default n
        help
          This is only for the developers who want to debug ftrace itself
index 9c31f42245e93b6ae6386da8370274f61f1de173..e6fba1798771b401eaf97d08b2c4d5926eb81995 100644 (file)
@@ -434,7 +434,7 @@ static int disable_trace_kprobe(struct trace_event_call *call,
        return 0;
 }
 
-#if defined(CONFIG_KPROBES_ON_FTRACE) && \
+#if defined(CONFIG_DYNAMIC_FTRACE) && \
        !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
 static bool __within_notrace_func(unsigned long addr)
 {
index b5295a0b05369c9927b01afb1fbcf88e220777e7..9880b6c0e2721fe5c0758eda58d82c1f36576d19 100644 (file)
@@ -3731,17 +3731,24 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
         * is updated and visible.
         */
        if (!freezable || !workqueue_freezing) {
+               bool kick = false;
+
                pwq->max_active = wq->saved_max_active;
 
                while (!list_empty(&pwq->delayed_works) &&
-                      pwq->nr_active < pwq->max_active)
+                      pwq->nr_active < pwq->max_active) {
                        pwq_activate_first_delayed(pwq);
+                       kick = true;
+               }
 
                /*
                 * Need to kick a worker after thawed or an unbound wq's
-                * max_active is bumped.  It's a slow path.  Do it always.
+                * max_active is bumped. In realtime scenarios, always kicking a
+                * worker will cause interference on the isolated cpu cores, so
+                * let's kick iff work items were activated.
                 */
-               wake_up_worker(pwq->pool);
+               if (kick)
+                       wake_up_worker(pwq->pool);
        } else {
                pwq->max_active = 0;
        }
index e6e58b26e8881bb860b1f6c223a7094b93575c21..7937265ef8797b68047218a60631dd5db5646333 100644 (file)
@@ -295,14 +295,6 @@ config GDB_SCRIPTS
 
 endif # DEBUG_INFO
 
-config ENABLE_MUST_CHECK
-       bool "Enable __must_check logic"
-       default y
-       help
-         Enable the __must_check logic in the kernel build.  Disable this to
-         suppress the "warning: ignoring return value of 'foo', declared with
-         attribute warn_unused_result" messages.
-
 config FRAME_WARN
        int "Warn for stack frames larger than"
        range 0 8192
index 1955d624177cffb90ad155aa1dbec3807eee1505..5baedc573dd6b8d3831a34011c70a156e5581ee3 100644 (file)
@@ -774,8 +774,8 @@ static const struct font_data fontdata_ter16x32 = {
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc,
        0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 95 */
-       0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00,
-       0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+       0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -1169,7 +1169,7 @@ static const struct font_data fontdata_ter16x32 = {
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x7f, 0xf8, 0x7f, 0xfc, 0x03, 0x9e, 0x03, 0x8e,
+       0x7e, 0xf8, 0x7f, 0xfc, 0x03, 0x9e, 0x03, 0x8e,
        0x03, 0x8e, 0x3f, 0x8e, 0x7f, 0xfe, 0xf3, 0xfe,
        0xe3, 0x80, 0xe3, 0x80, 0xe3, 0x80, 0xf3, 0xce,
        0x7f, 0xfe, 0x3e, 0xfc, 0x00, 0x00, 0x00, 0x00,
index 7f1244b5294a8a73b534e33aca40d993b7bcbfd0..dab97bb69df63e70beac7f956fdf239b963cd7bd 100644 (file)
@@ -81,14 +81,14 @@ static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
  * users set the same bit, one user will return remain bits, otherwise
  * return 0.
  */
-static int bitmap_set_ll(unsigned long *map, int start, int nr)
+static int bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr)
 {
        unsigned long *p = map + BIT_WORD(start);
-       const int size = start + nr;
+       const unsigned long size = start + nr;
        int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
        unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
 
-       while (nr - bits_to_set >= 0) {
+       while (nr >= bits_to_set) {
                if (set_bits_ll(p, mask_to_set))
                        return nr;
                nr -= bits_to_set;
@@ -116,14 +116,15 @@ static int bitmap_set_ll(unsigned long *map, int start, int nr)
  * users clear the same bit, one user will return remain bits,
  * otherwise return 0.
  */
-static int bitmap_clear_ll(unsigned long *map, int start, int nr)
+static unsigned long
+bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr)
 {
        unsigned long *p = map + BIT_WORD(start);
-       const int size = start + nr;
+       const unsigned long size = start + nr;
        int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
        unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
 
-       while (nr - bits_to_clear >= 0) {
+       while (nr >= bits_to_clear) {
                if (clear_bits_ll(p, mask_to_clear))
                        return nr;
                nr -= bits_to_clear;
@@ -183,8 +184,8 @@ int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t ph
                 size_t size, int nid, void *owner)
 {
        struct gen_pool_chunk *chunk;
-       int nbits = size >> pool->min_alloc_order;
-       int nbytes = sizeof(struct gen_pool_chunk) +
+       unsigned long nbits = size >> pool->min_alloc_order;
+       unsigned long nbytes = sizeof(struct gen_pool_chunk) +
                                BITS_TO_LONGS(nbits) * sizeof(long);
 
        chunk = vzalloc_node(nbytes, nid);
@@ -242,7 +243,7 @@ void gen_pool_destroy(struct gen_pool *pool)
        struct list_head *_chunk, *_next_chunk;
        struct gen_pool_chunk *chunk;
        int order = pool->min_alloc_order;
-       int bit, end_bit;
+       unsigned long bit, end_bit;
 
        list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
                chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
@@ -278,7 +279,7 @@ unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
        struct gen_pool_chunk *chunk;
        unsigned long addr = 0;
        int order = pool->min_alloc_order;
-       int nbits, start_bit, end_bit, remain;
+       unsigned long nbits, start_bit, end_bit, remain;
 
 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
        BUG_ON(in_nmi());
@@ -487,7 +488,7 @@ void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size,
 {
        struct gen_pool_chunk *chunk;
        int order = pool->min_alloc_order;
-       int start_bit, nbits, remain;
+       unsigned long start_bit, nbits, remain;
 
 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
        BUG_ON(in_nmi());
@@ -755,7 +756,7 @@ unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
        index = bitmap_find_next_zero_area(map, size, start, nr, 0);
 
        while (index < size) {
-               int next_bit = find_next_bit(map, size, index + nr);
+               unsigned long next_bit = find_next_bit(map, size, index + nr);
                if ((next_bit - index) < len) {
                        len = next_bit - index;
                        start_bit = index;
index 1635111c5bd2af92ae6683b97040ebf941266a28..a21e6a5792c5a53fcdb3ffd29328d9d5f6d5afad 100644 (file)
@@ -1658,7 +1658,7 @@ static int copy_compat_iovec_from_user(struct iovec *iov,
                (const struct compat_iovec __user *)uvec;
        int ret = -EFAULT, i;
 
-       if (!user_access_begin(uvec, nr_segs * sizeof(*uvec)))
+       if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
                return -EFAULT;
 
        for (i = 0; i < nr_segs; i++) {
index b4c0df6d706dcb5c8026abd07b894d008cad59c6..c770570bfe4f2d939a1331657f45a43edade9467 100644 (file)
@@ -48,7 +48,7 @@ endif
 endif
 
 quiet_cmd_unroll = UNROLL  $@
-      cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$* < $< > $@
+      cmd_unroll = $(AWK) -v N=$* -f $(srctree)/$(src)/unroll.awk < $< > $@
 
 targets += int1.c int2.c int4.c int8.c int16.c int32.c
 $(obj)/int%.c: $(src)/int.uc $(src)/unroll.awk FORCE
index 8e4d5afbbb1094c337c0a0b95b815f3f83234efe..66e1c96387c407c1ab1745c97d51a46bf33b94ee 100644 (file)
@@ -8,4 +8,4 @@
 
 obj-$(CONFIG_ZLIB_DFLTCC) += zlib_dfltcc.o
 
-zlib_dfltcc-objs := dfltcc.o dfltcc_deflate.o dfltcc_inflate.o dfltcc_syms.o
+zlib_dfltcc-objs := dfltcc.o dfltcc_deflate.o dfltcc_inflate.o
index c30de430b30ca64ae30d3299949e59ddb9520604..782f76e9d4dab1d339748c58599cba3a9378058b 100644 (file)
@@ -1,7 +1,8 @@
 // SPDX-License-Identifier: Zlib
 /* dfltcc.c - SystemZ DEFLATE CONVERSION CALL support. */
 
-#include <linux/zutil.h>
+#include <linux/export.h>
+#include <linux/module.h>
 #include "dfltcc_util.h"
 #include "dfltcc.h"
 
@@ -53,3 +54,6 @@ void dfltcc_reset(
     dfltcc_state->dht_threshold = DFLTCC_DHT_MIN_SAMPLE_SIZE;
     dfltcc_state->param.ribm = DFLTCC_RIBM;
 }
+EXPORT_SYMBOL(dfltcc_reset);
+
+MODULE_LICENSE("GPL");
index 00c185101c6d14a145d16b54b739f8b8caa2d830..6c946e8532eec6deb8a744dda760153ac7a1ca04 100644 (file)
@@ -4,6 +4,7 @@
 #include "dfltcc_util.h"
 #include "dfltcc.h"
 #include <asm/setup.h>
+#include <linux/export.h>
 #include <linux/zutil.h>
 
 /*
@@ -34,6 +35,7 @@ int dfltcc_can_deflate(
 
     return 1;
 }
+EXPORT_SYMBOL(dfltcc_can_deflate);
 
 static void dfltcc_gdht(
     z_streamp strm
@@ -277,3 +279,4 @@ again:
         goto again; /* deflate() must use all input or all output */
     return 1;
 }
+EXPORT_SYMBOL(dfltcc_deflate);
index db107016d29b32952efa5def52873176292ec720..fb60b5a6a1cb678acb2111cb2331a3a7e3f80a0d 100644 (file)
@@ -125,7 +125,7 @@ dfltcc_inflate_action dfltcc_inflate(
     param->ho = (state->write - state->whave) & ((1 << HB_BITS) - 1);
     if (param->hl)
         param->nt = 0; /* Honor history for the first block */
-    param->cv = state->flags ? REVERSE(state->check) : state->check;
+    param->cv = state->check;
 
     /* Inflate */
     do {
@@ -138,7 +138,7 @@ dfltcc_inflate_action dfltcc_inflate(
     state->bits = param->sbb;
     state->whave = param->hl;
     state->write = (param->ho + param->hl) & ((1 << HB_BITS) - 1);
-    state->check = state->flags ? REVERSE(param->cv) : param->cv;
+    state->check = param->cv;
     if (cc == DFLTCC_CC_OP2_CORRUPT && param->oesc != 0) {
         /* Report an error if stream is corrupted */
         state->mode = BAD;
diff --git a/lib/zlib_dfltcc/dfltcc_syms.c b/lib/zlib_dfltcc/dfltcc_syms.c
deleted file mode 100644 (file)
index 6f23481..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/lib/zlib_dfltcc/dfltcc_syms.c
- *
- * Exported symbols for the s390 zlib dfltcc support.
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/zlib.h>
-#include "dfltcc.h"
-
-EXPORT_SYMBOL(dfltcc_can_deflate);
-EXPORT_SYMBOL(dfltcc_deflate);
-EXPORT_SYMBOL(dfltcc_reset);
-MODULE_LICENSE("GPL");
index cbf32d2824fd473d309c157c514dfff38783be9b..18f6ee3179002a7f218dfafb4e7724685ff1c215 100644 (file)
@@ -4105,10 +4105,30 @@ retry_avoidcopy:
                 * may get SIGKILLed if it later faults.
                 */
                if (outside_reserve) {
+                       struct address_space *mapping = vma->vm_file->f_mapping;
+                       pgoff_t idx;
+                       u32 hash;
+
                        put_page(old_page);
                        BUG_ON(huge_pte_none(pte));
+                       /*
+                        * Drop hugetlb_fault_mutex and i_mmap_rwsem before
+                        * unmapping.  unmapping needs to hold i_mmap_rwsem
+                        * in write mode.  Dropping i_mmap_rwsem in read mode
+                        * here is OK as COW mappings do not interact with
+                        * PMD sharing.
+                        *
+                        * Reacquire both after unmap operation.
+                        */
+                       idx = vma_hugecache_offset(h, vma, haddr);
+                       hash = hugetlb_fault_mutex_hash(mapping, idx);
+                       mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+                       i_mmap_unlock_read(mapping);
+
                        unmap_ref_private(mm, vma, old_page, haddr);
-                       BUG_ON(huge_pte_none(pte));
+
+                       i_mmap_lock_read(mapping);
+                       mutex_lock(&hugetlb_fault_mutex_table[hash]);
                        spin_lock(ptl);
                        ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
                        if (likely(ptep &&
@@ -4351,7 +4371,7 @@ retry:
                 * So we need to block hugepage fault by PG_hwpoison bit check.
                 */
                if (unlikely(PageHWPoison(page))) {
-                       ret = VM_FAULT_HWPOISON |
+                       ret = VM_FAULT_HWPOISON_LARGE |
                                VM_FAULT_SET_HINDEX(hstate_index(h));
                        goto backout_unlocked;
                }
index 1dd5a0f9937261fa3ccfbcb5ac8125986838ce34..5106b84b07d4353a3d44618d49f30f544667af5c 100644 (file)
@@ -337,6 +337,8 @@ void kasan_record_aux_stack(void *addr)
        cache = page->slab_cache;
        object = nearest_obj(cache, page, addr);
        alloc_meta = kasan_get_alloc_meta(cache, object);
+       if (!alloc_meta)
+               return;
 
        alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
        alloc_meta->aux_stack[0] = kasan_save_stack(GFP_NOWAIT);
index bc0ad208b3a7a3751f00c46f3365fab7528fe949..7ca0b92d5886dca033e6b2a050441fd01b71c048 100644 (file)
@@ -64,7 +64,8 @@ static inline bool kasan_pmd_table(pud_t pud)
        return false;
 }
 #endif
-pte_t kasan_early_shadow_pte[PTRS_PER_PTE] __page_aligned_bss;
+pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
+       __page_aligned_bss;
 
 static inline bool kasan_pte_table(pmd_t pmd)
 {
index 5a38e9eade946b66b98730ec57ff80efeec93004..04d9f154a130d28f38d26dfb0a897656d747cd71 100644 (file)
@@ -1940,7 +1940,7 @@ retry:
                        goto retry;
                }
        } else if (ret == -EIO) {
-               pr_info("%s: %#lx: unknown page type: %lx (%pGP)\n",
+               pr_info("%s: %#lx: unknown page type: %lx (%pGp)\n",
                         __func__, pfn, page->flags, &page->flags);
        }
 
index 7d608765932b99e95aa5ba49e88328412084b1c4..feff48e1465a6ee652d130560617e2425109ee2f 100644 (file)
@@ -2892,11 +2892,13 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                entry = mk_pte(new_page, vma->vm_page_prot);
                entry = pte_sw_mkyoung(entry);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+
                /*
                 * Clear the pte entry and flush it first, before updating the
-                * pte with the new entry. This will avoid a race condition
-                * seen in the presence of one thread doing SMC and another
-                * thread doing COW.
+                * pte with the new entry, to keep TLBs on different CPUs in
+                * sync. This code used to set the new PTE then flush TLBs, but
+                * that left a window where the new PTE could be loaded into
+                * some TLBs while the old PTE remains in others.
                 */
                ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
                page_add_new_anon_rmap(new_page, vma, vmf->address, false);
index af41fb99082004bcfcdfbf4f2c5eb0445705e188..f9d57b9be8c71d5b35a17bd3752a8c7c070b674d 100644 (file)
@@ -713,7 +713,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
         * expects the zone spans the pfn range. All the pages in the range
         * are reserved so nobody should be touching them so we should be safe
         */
-       memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
+       memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, 0,
                         MEMINIT_HOTPLUG, altmap, migratetype);
 
        set_zone_contiguous(zone);
index 8cf96bd21341cf30e186c5bb1f893ab267149860..2c3a8650205343c54e451b98e5d7022c2c4b7109 100644 (file)
@@ -1111,7 +1111,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
                     const nodemask_t *to, int flags)
 {
        int busy = 0;
-       int err;
+       int err = 0;
        nodemask_t tmp;
 
        migrate_prep();
index c5590afe71650a21b98b33f748d97dd99850f30a..f554320281ccd830fd171c04d33e970e158d30fa 100644 (file)
@@ -358,7 +358,9 @@ static unsigned long get_extent(enum pgt_entry entry, unsigned long old_addr,
 
        next = (old_addr + size) & mask;
        /* even if next overflowed, extent below will be ok */
-       extent = (next > old_end) ? old_end - old_addr : next - old_addr;
+       extent = next - old_addr;
+       if (extent > old_end - old_addr)
+               extent = old_end - old_addr;
        next = (new_addr + size) & mask;
        if (extent > next - new_addr)
                extent = next - new_addr;
index 586042472ac9010e757d39e86d3283f55202951b..eb34d204d4ee717f0cc3e8c640df1a4c10f1029a 100644 (file)
@@ -2826,7 +2826,7 @@ EXPORT_SYMBOL(__test_set_page_writeback);
  */
 void wait_on_page_writeback(struct page *page)
 {
-       if (PageWriteback(page)) {
+       while (PageWriteback(page)) {
                trace_wait_on_page_writeback(page, page_mapping(page));
                wait_on_page_bit(page, PG_writeback);
        }
index 7a2c89b21115011e738823181fccfffe9e321158..027f6481ba59bca36ff024dbbf62cfa3f7ef04e9 100644 (file)
@@ -423,6 +423,8 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
        if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
                return false;
 
+       if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
+               return true;
        /*
         * We start only with one section of pages, more pages are added as
         * needed until the rest of deferred pages are initialized.
@@ -2860,20 +2862,20 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
 {
        struct page *page;
 
-#ifdef CONFIG_CMA
-       /*
-        * Balance movable allocations between regular and CMA areas by
-        * allocating from CMA when over half of the zone's free memory
-        * is in the CMA area.
-        */
-       if (alloc_flags & ALLOC_CMA &&
-           zone_page_state(zone, NR_FREE_CMA_PAGES) >
-           zone_page_state(zone, NR_FREE_PAGES) / 2) {
-               page = __rmqueue_cma_fallback(zone, order);
-               if (page)
-                       return page;
+       if (IS_ENABLED(CONFIG_CMA)) {
+               /*
+                * Balance movable allocations between regular and CMA areas by
+                * allocating from CMA when over half of the zone's free memory
+                * is in the CMA area.
+                */
+               if (alloc_flags & ALLOC_CMA &&
+                   zone_page_state(zone, NR_FREE_CMA_PAGES) >
+                   zone_page_state(zone, NR_FREE_PAGES) / 2) {
+                       page = __rmqueue_cma_fallback(zone, order);
+                       if (page)
+                               goto out;
+               }
        }
-#endif
 retry:
        page = __rmqueue_smallest(zone, order, migratetype);
        if (unlikely(!page)) {
@@ -2884,8 +2886,9 @@ retry:
                                                                alloc_flags))
                        goto retry;
        }
-
-       trace_mm_page_alloc_zone_locked(page, order, migratetype);
+out:
+       if (page)
+               trace_mm_page_alloc_zone_locked(page, order, migratetype);
        return page;
 }
 
@@ -6116,7 +6119,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
  * zone stats (e.g., nr_isolate_pageblock) are touched.
  */
 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
-               unsigned long start_pfn,
+               unsigned long start_pfn, unsigned long zone_end_pfn,
                enum meminit_context context,
                struct vmem_altmap *altmap, int migratetype)
 {
@@ -6152,7 +6155,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
                if (context == MEMINIT_EARLY) {
                        if (overlap_memmap_init(zone, &pfn))
                                continue;
-                       if (defer_init(nid, pfn, end_pfn))
+                       if (defer_init(nid, pfn, zone_end_pfn))
                                break;
                }
 
@@ -6266,7 +6269,7 @@ void __meminit __weak memmap_init(unsigned long size, int nid,
 
                if (end_pfn > start_pfn) {
                        size = end_pfn - start_pfn;
-                       memmap_init_zone(size, nid, zone, start_pfn,
+                       memmap_init_zone(size, nid, zone, start_pfn, range_end_pfn,
                                         MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
                }
        }
index 4bcc119580890753d27d25807a18ab58b520b002..f5fee9cf90f8bb35d81a1424947c9299b09959a0 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mm.h>
 #include <linux/uio.h>
 #include <linux/sched.h>
+#include <linux/compat.h>
 #include <linux/sched/mm.h>
 #include <linux/highmem.h>
 #include <linux/ptrace.h>
index 0c8b43a5b3b0339820c891cb9cde893387e03b13..d9e4e10683cc129c33c67864e75c926a5a7bf9d6 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1619,9 +1619,6 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
        else
                page = __alloc_pages_node(node, flags, order);
 
-       if (page)
-               account_slab_page(page, order, s);
-
        return page;
 }
 
@@ -1774,6 +1771,8 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 
        page->objects = oo_objects(oo);
 
+       account_slab_page(page, oo_order(oo), s);
+
        page->slab_cache = s;
        __SetPageSlab(page);
        if (page_is_pfmemalloc(page))
@@ -1974,7 +1973,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
 
                t = acquire_slab(s, n, page, object == NULL, &objects);
                if (!t)
-                       break;
+                       continue; /* cmpxchg raced */
 
                available += objects;
                if (!object) {
index 4d88fe5a277ac2d0e520f37f4dc7ec0a779fc9fe..e6f352bf0498248a5f36c991f56c80ac9cf290c3 100644 (file)
@@ -2420,8 +2420,10 @@ void *vmap(struct page **pages, unsigned int count,
                return NULL;
        }
 
-       if (flags & VM_MAP_PUT_PAGES)
+       if (flags & VM_MAP_PUT_PAGES) {
                area->pages = pages;
+               area->nr_pages = count;
+       }
        return area->addr;
 }
 EXPORT_SYMBOL(vmap);
index 257cba79a96dd024251478235b237f60b048cb70..b1b574ad199d2ca8bde196e73de0a129e8a61e43 100644 (file)
@@ -1238,6 +1238,8 @@ static unsigned int shrink_page_list(struct list_head *page_list,
                        if (!PageSwapCache(page)) {
                                if (!(sc->gfp_mask & __GFP_IO))
                                        goto keep_locked;
+                               if (page_maybe_dma_pinned(page))
+                                       goto keep_locked;
                                if (PageTransHuge(page)) {
                                        /* cannot split THP, skip it */
                                        if (!can_split_huge_page(page, NULL))
index f292e0267bb9ea30a4b38e60b039d99c6be2d364..8b644113715e9db49effcaa60cfe45572304bb13 100644 (file)
@@ -284,8 +284,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
        return 0;
 
 out_free_newdev:
-       if (new_dev->reg_state == NETREG_UNINITIALIZED)
-               free_netdev(new_dev);
+       free_netdev(new_dev);
        return err;
 }
 
index c1c30a9f76f343e90570102e6eb9d8407ae563ea..8b796c499cbb243f7cfa9552d02703602316fd70 100644 (file)
@@ -272,7 +272,8 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
            kattr->test.repeat)
                return -EINVAL;
 
-       if (ctx_size_in < prog->aux->max_ctx_offset)
+       if (ctx_size_in < prog->aux->max_ctx_offset ||
+           ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
                return -EINVAL;
 
        if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
index 7839c3b9e5bea29e204461b58fcd868cef303c3a..3ef7f78e553bc93e2c24db9b5b8a016cc0235f28 100644 (file)
@@ -1155,6 +1155,7 @@ static int isotp_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
        if (peer)
                return -EOPNOTSUPP;
 
+       memset(addr, 0, sizeof(*addr));
        addr->can_family = AF_CAN;
        addr->can_ifindex = so->ifindex;
        addr->can_addr.tp.rx_id = so->rxid;
index c1ebb2aa08b5cb604a085d06198767442ef6f28d..c38d8de93836371f0beb51bee80c1ef9b81485f2 100644 (file)
@@ -1333,7 +1333,8 @@ static int prepare_auth_signature(struct ceph_connection *con)
        void *buf;
        int ret;
 
-       buf = alloc_conn_buf(con, head_onwire_len(SHA256_DIGEST_SIZE, false));
+       buf = alloc_conn_buf(con, head_onwire_len(SHA256_DIGEST_SIZE,
+                                                 con_secure(con)));
        if (!buf)
                return -ENOMEM;
 
@@ -2032,10 +2033,18 @@ bad:
        return -EINVAL;
 }
 
+/*
+ * Align session_key and con_secret to avoid GFP_ATOMIC allocation
+ * inside crypto_shash_setkey() and crypto_aead_setkey() called from
+ * setup_crypto().  __aligned(16) isn't guaranteed to work for stack
+ * objects, so do it by hand.
+ */
 static int process_auth_done(struct ceph_connection *con, void *p, void *end)
 {
-       u8 session_key[CEPH_KEY_LEN];
-       u8 con_secret[CEPH_MAX_CON_SECRET_LEN];
+       u8 session_key_buf[CEPH_KEY_LEN + 16];
+       u8 con_secret_buf[CEPH_MAX_CON_SECRET_LEN + 16];
+       u8 *session_key = PTR_ALIGN(&session_key_buf[0], 16);
+       u8 *con_secret = PTR_ALIGN(&con_secret_buf[0], 16);
        int session_key_len, con_secret_len;
        int payload_len;
        u64 global_id;
index 8fa739259041aaa03585b5a7b8ebce862f4b7d1d..a979b86dbacda9dfe31dd8b269024f7f0f5a8ef1 100644 (file)
@@ -9661,9 +9661,20 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
                }
        }
 
-       if ((features & NETIF_F_HW_TLS_TX) && !(features & NETIF_F_HW_CSUM)) {
-               netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
-               features &= ~NETIF_F_HW_TLS_TX;
+       if (features & NETIF_F_HW_TLS_TX) {
+               bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
+                       (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+               bool hw_csum = features & NETIF_F_HW_CSUM;
+
+               if (!ip_csum && !hw_csum) {
+                       netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
+                       features &= ~NETIF_F_HW_TLS_TX;
+               }
+       }
+
+       if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
+               netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
+               features &= ~NETIF_F_HW_TLS_RX;
        }
 
        return features;
@@ -10077,17 +10088,11 @@ int register_netdevice(struct net_device *dev)
        ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
        ret = notifier_to_errno(ret);
        if (ret) {
+               /* Expect explicit free_netdev() on failure */
+               dev->needs_free_netdev = false;
                rollback_registered(dev);
-               rcu_barrier();
-
-               dev->reg_state = NETREG_UNREGISTERED;
-               /* We should put the kobject that hold in
-                * netdev_unregister_kobject(), otherwise
-                * the net device cannot be freed when
-                * driver calls free_netdev(), because the
-                * kobject is being hold.
-                */
-               kobject_put(&dev->dev.kobj);
+               net_set_todo(dev);
+               goto out;
        }
        /*
         *      Prevent userspace races by waiting until the network
@@ -10631,6 +10636,17 @@ void free_netdev(struct net_device *dev)
        struct napi_struct *p, *n;
 
        might_sleep();
+
+       /* When called immediately after register_netdevice() failed the unwind
+        * handling may still be dismantling the device. Handle that case by
+        * deferring the free.
+        */
+       if (dev->reg_state == NETREG_UNREGISTERING) {
+               ASSERT_RTNL();
+               dev->needs_free_netdev = true;
+               return;
+       }
+
        netif_free_tx_queues(dev);
        netif_free_rx_queues(dev);
 
index ee828e4b1007e9809e2495a759b869dafb8daa14..738d4344d6799a0dda3051e265da84fcabc0d86c 100644 (file)
@@ -4146,7 +4146,7 @@ out:
 static int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
                                              struct genl_info *info)
 {
-       struct devlink_port *devlink_port = info->user_ptr[0];
+       struct devlink_port *devlink_port = info->user_ptr[1];
        struct devlink_param_item *param_item;
        struct sk_buff *msg;
        int err;
@@ -4175,7 +4175,7 @@ static int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
 static int devlink_nl_cmd_port_param_set_doit(struct sk_buff *skb,
                                              struct genl_info *info)
 {
-       struct devlink_port *devlink_port = info->user_ptr[0];
+       struct devlink_port *devlink_port = info->user_ptr[1];
 
        return __devlink_nl_cmd_param_set_doit(devlink_port->devlink,
                                               devlink_port->index,
index 80dbf2f4016e26824bc968115503ca2072933f63..8e582e29a41e39809cc534865bb3c91c05b3d9f2 100644 (file)
@@ -80,11 +80,11 @@ static void est_timer(struct timer_list *t)
        u64 rate, brate;
 
        est_fetch_counters(est, &b);
-       brate = (b.bytes - est->last_bytes) << (10 - est->ewma_log - est->intvl_log);
-       brate -= (est->avbps >> est->ewma_log);
+       brate = (b.bytes - est->last_bytes) << (10 - est->intvl_log);
+       brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log);
 
-       rate = (b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log);
-       rate -= (est->avpps >> est->ewma_log);
+       rate = (b.packets - est->last_packets) << (10 - est->intvl_log);
+       rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log);
 
        write_seqcount_begin(&est->seq);
        est->avbps += brate;
@@ -143,6 +143,9 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
        if (parm->interval < -2 || parm->interval > 3)
                return -EINVAL;
 
+       if (parm->ewma_log == 0 || parm->ewma_log >= 31)
+               return -EINVAL;
+
        est = kzalloc(sizeof(*est), GFP_KERNEL);
        if (!est)
                return -ENOBUFS;
index 9500d28a43b0e1a390382912b6fb59db935e727b..277ed854aef1c362df96bedfa19b07ef1b6d6b80 100644 (file)
@@ -1569,10 +1569,8 @@ static void neigh_proxy_process(struct timer_list *t)
 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
                    struct sk_buff *skb)
 {
-       unsigned long now = jiffies;
-
-       unsigned long sched_next = now + (prandom_u32() %
-                                         NEIGH_VAR(p, PROXY_DELAY));
+       unsigned long sched_next = jiffies +
+                       prandom_u32_max(NEIGH_VAR(p, PROXY_DELAY));
 
        if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
                kfree_skb(skb);
index 999b70c59761d8f08429306da0c377d83d8a1b53..daf502c13d6dafc28db2a84f6f1bcec923c8f4d1 100644 (file)
@@ -1317,8 +1317,8 @@ static const struct attribute_group dql_group = {
 static ssize_t xps_cpus_show(struct netdev_queue *queue,
                             char *buf)
 {
+       int cpu, len, ret, num_tc = 1, tc = 0;
        struct net_device *dev = queue->dev;
-       int cpu, len, num_tc = 1, tc = 0;
        struct xps_dev_maps *dev_maps;
        cpumask_var_t mask;
        unsigned long index;
@@ -1328,22 +1328,31 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
 
        index = get_netdev_queue_index(queue);
 
+       if (!rtnl_trylock())
+               return restart_syscall();
+
        if (dev->num_tc) {
                /* Do not allow XPS on subordinate device directly */
                num_tc = dev->num_tc;
-               if (num_tc < 0)
-                       return -EINVAL;
+               if (num_tc < 0) {
+                       ret = -EINVAL;
+                       goto err_rtnl_unlock;
+               }
 
                /* If queue belongs to subordinate dev use its map */
                dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
 
                tc = netdev_txq_to_tc(dev, index);
-               if (tc < 0)
-                       return -EINVAL;
+               if (tc < 0) {
+                       ret = -EINVAL;
+                       goto err_rtnl_unlock;
+               }
        }
 
-       if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
-               return -ENOMEM;
+       if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
+               ret = -ENOMEM;
+               goto err_rtnl_unlock;
+       }
 
        rcu_read_lock();
        dev_maps = rcu_dereference(dev->xps_cpus_map);
@@ -1366,9 +1375,15 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
        }
        rcu_read_unlock();
 
+       rtnl_unlock();
+
        len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
        free_cpumask_var(mask);
        return len < PAGE_SIZE ? len : -EINVAL;
+
+err_rtnl_unlock:
+       rtnl_unlock();
+       return ret;
 }
 
 static ssize_t xps_cpus_store(struct netdev_queue *queue,
@@ -1396,7 +1411,13 @@ static ssize_t xps_cpus_store(struct netdev_queue *queue,
                return err;
        }
 
+       if (!rtnl_trylock()) {
+               free_cpumask_var(mask);
+               return restart_syscall();
+       }
+
        err = netif_set_xps_queue(dev, mask, index);
+       rtnl_unlock();
 
        free_cpumask_var(mask);
 
@@ -1408,22 +1429,29 @@ static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init
 
 static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
 {
+       int j, len, ret, num_tc = 1, tc = 0;
        struct net_device *dev = queue->dev;
        struct xps_dev_maps *dev_maps;
        unsigned long *mask, index;
-       int j, len, num_tc = 1, tc = 0;
 
        index = get_netdev_queue_index(queue);
 
+       if (!rtnl_trylock())
+               return restart_syscall();
+
        if (dev->num_tc) {
                num_tc = dev->num_tc;
                tc = netdev_txq_to_tc(dev, index);
-               if (tc < 0)
-                       return -EINVAL;
+               if (tc < 0) {
+                       ret = -EINVAL;
+                       goto err_rtnl_unlock;
+               }
        }
        mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL);
-       if (!mask)
-               return -ENOMEM;
+       if (!mask) {
+               ret = -ENOMEM;
+               goto err_rtnl_unlock;
+       }
 
        rcu_read_lock();
        dev_maps = rcu_dereference(dev->xps_rxqs_map);
@@ -1449,10 +1477,16 @@ static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
 out_no_maps:
        rcu_read_unlock();
 
+       rtnl_unlock();
+
        len = bitmap_print_to_pagebuf(false, buf, mask, dev->num_rx_queues);
        bitmap_free(mask);
 
        return len < PAGE_SIZE ? len : -EINVAL;
+
+err_rtnl_unlock:
+       rtnl_unlock();
+       return ret;
 }
 
 static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
@@ -1478,10 +1512,17 @@ static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
                return err;
        }
 
+       if (!rtnl_trylock()) {
+               bitmap_free(mask);
+               return restart_syscall();
+       }
+
        cpus_read_lock();
        err = __netif_set_xps_queue(dev, mask, index, true);
        cpus_read_unlock();
 
+       rtnl_unlock();
+
        bitmap_free(mask);
        return err ? : len;
 }
index bb0596c41b3efb8b86de22797891c0e88fc9cbb4..3d6ab194d0f58c909ada67d6bd6dd6f28576c932 100644 (file)
@@ -3439,26 +3439,15 @@ replay:
 
        dev->ifindex = ifm->ifi_index;
 
-       if (ops->newlink) {
+       if (ops->newlink)
                err = ops->newlink(link_net ? : net, dev, tb, data, extack);
-               /* Drivers should call free_netdev() in ->destructor
-                * and unregister it on failure after registration
-                * so that device could be finally freed in rtnl_unlock.
-                */
-               if (err < 0) {
-                       /* If device is not registered at all, free it now */
-                       if (dev->reg_state == NETREG_UNINITIALIZED ||
-                           dev->reg_state == NETREG_UNREGISTERED)
-                               free_netdev(dev);
-                       goto out;
-               }
-       } else {
+       else
                err = register_netdevice(dev);
-               if (err < 0) {
-                       free_netdev(dev);
-                       goto out;
-               }
+       if (err < 0) {
+               free_netdev(dev);
+               goto out;
        }
+
        err = rtnl_configure_link(dev, ifm);
        if (err < 0)
                goto out_unregister;
index f62cae3f75d877adec1efbf9726c42515f3912e8..785daff48030d328b879afe58db35c8d96a58ea7 100644 (file)
@@ -437,7 +437,11 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
 
        len += NET_SKB_PAD;
 
-       if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
+       /* If requested length is either too small or too big,
+        * we use kmalloc() for skb->head allocation.
+        */
+       if (len <= SKB_WITH_OVERHEAD(1024) ||
+           len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
            (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
                skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
                if (!skb)
@@ -501,13 +505,17 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
                                 gfp_t gfp_mask)
 {
-       struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+       struct napi_alloc_cache *nc;
        struct sk_buff *skb;
        void *data;
 
        len += NET_SKB_PAD + NET_IP_ALIGN;
 
-       if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
+       /* If requested length is either too small or too big,
+        * we use kmalloc() for skb->head allocation.
+        */
+       if (len <= SKB_WITH_OVERHEAD(1024) ||
+           len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
            (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
                skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
                if (!skb)
@@ -515,6 +523,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
                goto skb_success;
        }
 
+       nc = this_cpu_ptr(&napi_alloc_cache);
        len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
        len = SKB_DATA_ALIGN(len);
 
@@ -3442,6 +3451,7 @@ void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
        st->root_skb = st->cur_skb = skb;
        st->frag_idx = st->stepped_offset = 0;
        st->frag_data = NULL;
+       st->frag_off = 0;
 }
 EXPORT_SYMBOL(skb_prepare_seq_read);
 
@@ -3496,14 +3506,27 @@ next_skb:
                st->stepped_offset += skb_headlen(st->cur_skb);
 
        while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
+               unsigned int pg_idx, pg_off, pg_sz;
+
                frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
-               block_limit = skb_frag_size(frag) + st->stepped_offset;
 
+               pg_idx = 0;
+               pg_off = skb_frag_off(frag);
+               pg_sz = skb_frag_size(frag);
+
+               if (skb_frag_must_loop(skb_frag_page(frag))) {
+                       pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT;
+                       pg_off = offset_in_page(pg_off + st->frag_off);
+                       pg_sz = min_t(unsigned int, pg_sz - st->frag_off,
+                                                   PAGE_SIZE - pg_off);
+               }
+
+               block_limit = pg_sz + st->stepped_offset;
                if (abs_offset < block_limit) {
                        if (!st->frag_data)
-                               st->frag_data = kmap_atomic(skb_frag_page(frag));
+                               st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx);
 
-                       *data = (u8 *) st->frag_data + skb_frag_off(frag) +
+                       *data = (u8 *)st->frag_data + pg_off +
                                (abs_offset - st->stepped_offset);
 
                        return block_limit - abs_offset;
@@ -3514,8 +3537,12 @@ next_skb:
                        st->frag_data = NULL;
                }
 
-               st->frag_idx++;
-               st->stepped_offset += skb_frag_size(frag);
+               st->stepped_offset += pg_sz;
+               st->frag_off += pg_sz;
+               if (st->frag_off == skb_frag_size(frag)) {
+                       st->frag_off = 0;
+                       st->frag_idx++;
+               }
        }
 
        if (st->frag_data) {
@@ -3655,7 +3682,8 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
        unsigned int delta_truesize = 0;
        unsigned int delta_len = 0;
        struct sk_buff *tail = NULL;
-       struct sk_buff *nskb;
+       struct sk_buff *nskb, *tmp;
+       int err;
 
        skb_push(skb, -skb_network_offset(skb) + offset);
 
@@ -3665,11 +3693,28 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
                nskb = list_skb;
                list_skb = list_skb->next;
 
+               err = 0;
+               if (skb_shared(nskb)) {
+                       tmp = skb_clone(nskb, GFP_ATOMIC);
+                       if (tmp) {
+                               consume_skb(nskb);
+                               nskb = tmp;
+                               err = skb_unclone(nskb, GFP_ATOMIC);
+                       } else {
+                               err = -ENOMEM;
+                       }
+               }
+
                if (!tail)
                        skb->next = nskb;
                else
                        tail->next = nskb;
 
+               if (unlikely(err)) {
+                       nskb->next = list_skb;
+                       goto err_linearize;
+               }
+
                tail = nskb;
 
                delta_len += nskb->len;
index bbdd3c7b6cb5b960e4d107c3f25e7067db938483..b065f0a103ed06e40105967b9926679d5840d00a 100644 (file)
@@ -293,7 +293,7 @@ select_by_hash:
                        i = j = reciprocal_scale(hash, socks);
                        while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
                                i++;
-                               if (i >= reuse->num_socks)
+                               if (i >= socks)
                                        i = 0;
                                if (i == j)
                                        goto out;
index 084e159a12ba6db258e18be4e58779b6dcf49cbf..653e3bc9c87b9161d75a3682d096177d2a3e56bf 100644 (file)
@@ -1765,6 +1765,8 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
        fn = &reply_funcs[dcb->cmd];
        if (!fn->cb)
                return -EOPNOTSUPP;
+       if (fn->type == RTM_SETDCB && !netlink_capable(skb, CAP_NET_ADMIN))
+               return -EPERM;
 
        if (!tb[DCB_ATTR_IFNAME])
                return -EINVAL;
index 183003e45762ad312a4f0802351535dd5b115f33..a47e0f9b20d0a98e04486b4104e90b41122eca36 100644 (file)
@@ -353,9 +353,13 @@ static int dsa_port_devlink_setup(struct dsa_port *dp)
 
 static void dsa_port_teardown(struct dsa_port *dp)
 {
+       struct devlink_port *dlp = &dp->devlink_port;
+
        if (!dp->setup)
                return;
 
+       devlink_port_type_clear(dlp);
+
        switch (dp->type) {
        case DSA_PORT_TYPE_UNUSED:
                break;
index 5a0f6fec4271d4ca1484003bff29e651ad67c785..cb3a5cf99b2583f4255471d242b27cd36e34f9e1 100644 (file)
@@ -309,8 +309,18 @@ static struct lock_class_key dsa_master_addr_list_lock_key;
 int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
 {
        int mtu = ETH_DATA_LEN + cpu_dp->tag_ops->overhead;
+       struct dsa_switch *ds = cpu_dp->ds;
+       struct device_link *consumer_link;
        int ret;
 
+       /* The DSA master must use SET_NETDEV_DEV for this to work. */
+       consumer_link = device_link_add(ds->dev, dev->dev.parent,
+                                       DL_FLAG_AUTOREMOVE_CONSUMER);
+       if (!consumer_link)
+               netdev_err(dev,
+                          "Failed to create a device link to DSA switch %s\n",
+                          dev_name(ds->dev));
+
        rtnl_lock();
        ret = dev_set_mtu(dev, mtu);
        rtnl_unlock();
index 8b07f3a4f2db2509bed581ab549c09b621c8b7ea..a3271ec3e1627fb4f6e29da0e0fb1a638fe7e789 100644 (file)
@@ -443,7 +443,6 @@ static int esp_output_encap(struct xfrm_state *x, struct sk_buff *skb,
 int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
 {
        u8 *tail;
-       u8 *vaddr;
        int nfrags;
        int esph_offset;
        struct page *page;
@@ -485,14 +484,10 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
                        page = pfrag->page;
                        get_page(page);
 
-                       vaddr = kmap_atomic(page);
-
-                       tail = vaddr + pfrag->offset;
+                       tail = page_address(page) + pfrag->offset;
 
                        esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
 
-                       kunmap_atomic(vaddr);
-
                        nfrags = skb_shinfo(skb)->nr_frags;
 
                        __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
index cdf6ec5aa45de3e881e803a9b48d5a5dedd6693b..84bb707bd88d84218b9b67996ed26f7c1423ade1 100644 (file)
@@ -292,7 +292,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
                        .flowi4_iif = LOOPBACK_IFINDEX,
                        .flowi4_oif = l3mdev_master_ifindex_rcu(dev),
                        .daddr = ip_hdr(skb)->saddr,
-                       .flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
+                       .flowi4_tos = ip_hdr(skb)->tos & IPTOS_RT_MASK,
                        .flowi4_scope = scope,
                        .flowi4_mark = vmark ? skb->mark : 0,
                };
index 66fdbfe5447cdb93e06fe85d94646a6806401e98..5d1e6fe9d8387be37fad18b7c6a7a65fd9c18d27 100644 (file)
@@ -128,7 +128,7 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
         * to 0 and sets the configured key in the
         * inner erspan header field
         */
-       if (greh->protocol == htons(ETH_P_ERSPAN) ||
+       if ((greh->protocol == htons(ETH_P_ERSPAN) && hdr_len != 4) ||
            greh->protocol == htons(ETH_P_ERSPAN2)) {
                struct erspan_base_hdr *ershdr;
 
index fd8b8800a2c3022666f46b9ba2ac984f7cf6b04d..6bd7ca09af03dd5385096f749cf05afecb4b7795 100644 (file)
@@ -851,6 +851,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
                newicsk->icsk_retransmits = 0;
                newicsk->icsk_backoff     = 0;
                newicsk->icsk_probes_out  = 0;
+               newicsk->icsk_probes_tstamp = 0;
 
                /* Deinitialize accept_queue to trap illegal accesses. */
                memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
index 89fff5f59eea441c1b016a4576aa3c4e5a204935..2ed0b01f72f01cc29296ba62f925ba9039b00fa1 100644 (file)
@@ -302,7 +302,7 @@ static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *
        if (skb_is_gso(skb))
                return ip_finish_output_gso(net, sk, skb, mtu);
 
-       if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU))
+       if (skb->len > mtu || IPCB(skb)->frag_max_size)
                return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
 
        return ip_finish_output2(net, sk, skb);
index ee65c9225178d66948a7b70b3d243499eadbc168..64594aa755f0549081185575c5108e753f50749c 100644 (file)
@@ -759,8 +759,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                goto tx_error;
        }
 
-       if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph,
-                           0, 0, false)) {
+       df = tnl_params->frag_off;
+       if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
+               df |= (inner_iph->frag_off & htons(IP_DF));
+
+       if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, 0, 0, false)) {
                ip_rt_put(rt);
                goto tx_error;
        }
@@ -788,10 +791,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                        ttl = ip4_dst_hoplimit(&rt->dst);
        }
 
-       df = tnl_params->frag_off;
-       if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
-               df |= (inner_iph->frag_off&htons(IP_DF));
-
        max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
                        + rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
        if (max_headroom > dev->needed_headroom)
index 563b62b76a5f18aadafbd6dd7376e18d6285e43c..c576a63d09db1b5412becc51052441a7352f122f 100644 (file)
@@ -1379,7 +1379,7 @@ static int compat_get_entries(struct net *net,
        xt_compat_lock(NFPROTO_ARP);
        t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
        if (!IS_ERR(t)) {
-               const struct xt_table_info *private = t->private;
+               const struct xt_table_info *private = xt_table_get_private_protected(t);
                struct xt_table_info info;
 
                ret = compat_table_info(private, &info);
index 6e2851f8d3a3fa7c488c5e4894a9d0887e76b4cb..e8f6f9d86237635b26b37ef8d473a40a9f88c5b6 100644 (file)
@@ -1589,7 +1589,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
        xt_compat_lock(AF_INET);
        t = xt_find_table_lock(net, AF_INET, get.name);
        if (!IS_ERR(t)) {
-               const struct xt_table_info *private = t->private;
+               const struct xt_table_info *private = xt_table_get_private_protected(t);
                struct xt_table_info info;
                ret = compat_table_info(private, &info);
                if (!ret && get.size == info.size)
index cc23f1ce239c28ac9c12ef1a5ef5316407e2bee9..8cd3224d913e0ca5cc0c227e813b5f279fed5545 100644 (file)
@@ -76,7 +76,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
        flow.daddr = iph->saddr;
        flow.saddr = rpfilter_get_saddr(iph->daddr);
        flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
-       flow.flowi4_tos = RT_TOS(iph->tos);
+       flow.flowi4_tos = iph->tos & IPTOS_RT_MASK;
        flow.flowi4_scope = RT_SCOPE_UNIVERSE;
        flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
 
index 5e1b22d4f939f504371367b1c8543bbec9a0e71b..e53e43aef78540a6f48b3355d90c71a064f4f926 100644 (file)
@@ -627,7 +627,7 @@ static int nh_check_attr_group(struct net *net, struct nlattr *tb[],
        for (i = NHA_GROUP_TYPE + 1; i < __NHA_MAX; ++i) {
                if (!tb[i])
                        continue;
-               if (tb[NHA_FDB])
+               if (i == NHA_FDB)
                        continue;
                NL_SET_ERR_MSG(extack,
                               "No other attributes can be set in nexthop groups");
@@ -1459,8 +1459,10 @@ static struct nexthop *nexthop_create_group(struct net *net,
        return nh;
 
 out_no_nh:
-       for (; i >= 0; --i)
+       for (i--; i >= 0; --i) {
+               list_del(&nhg->nh_entries[i].nh_list);
                nexthop_put(nhg->nh_entries[i].nh);
+       }
 
        kfree(nhg->spare);
        kfree(nhg);
index ed42d2193c5c76bc9d48f36c13e72ca5be8aee1f..32545ecf2ab105739e9a751b5202a7b3c9d6b22e 100644 (file)
@@ -2937,6 +2937,7 @@ int tcp_disconnect(struct sock *sk, int flags)
 
        icsk->icsk_backoff = 0;
        icsk->icsk_probes_out = 0;
+       icsk->icsk_probes_tstamp = 0;
        icsk->icsk_rto = TCP_TIMEOUT_INIT;
        icsk->icsk_rto_min = TCP_RTO_MIN;
        icsk->icsk_delack_max = TCP_DELACK_MAX;
index c7e16b0ed791fcbd864860d6216339542e286929..a7dfca0a38cd7a0cdbe0a1c6d9b17866a33ebdc1 100644 (file)
@@ -3384,6 +3384,7 @@ static void tcp_ack_probe(struct sock *sk)
                return;
        if (!after(TCP_SKB_CB(head)->end_seq, tcp_wnd_end(tp))) {
                icsk->icsk_backoff = 0;
+               icsk->icsk_probes_tstamp = 0;
                inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
                /* Socket must be waked up by subsequent tcp_data_snd_check().
                 * This function is not for random using!
@@ -4396,10 +4397,9 @@ static void tcp_rcv_spurious_retrans(struct sock *sk, const struct sk_buff *skb)
         * The receiver remembers and reflects via DSACKs. Leverage the
         * DSACK state and change the txhash to re-route speculatively.
         */
-       if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq) {
-               sk_rethink_txhash(sk);
+       if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq &&
+           sk_rethink_txhash(sk))
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDUPLICATEDATAREHASH);
-       }
 }
 
 static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
index 58207c7769d05693b650e3c93e4ef405a5d4b23a..777306b5bc224d6e36da13641b5cdf4f0dba81fd 100644 (file)
@@ -1595,6 +1595,8 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
                tcp_move_syn(newtp, req);
                ireq->ireq_opt = NULL;
        } else {
+               newinet->inet_opt = NULL;
+
                if (!req_unhash && found_dup_sk) {
                        /* This code path should only be executed in the
                         * syncookie case only
@@ -1602,8 +1604,6 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
                        bh_unlock_sock(newsk);
                        sock_put(newsk);
                        newsk = NULL;
-               } else {
-                       newinet->inet_opt = NULL;
                }
        }
        return newsk;
@@ -1760,6 +1760,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
 {
        u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
+       u32 tail_gso_size, tail_gso_segs;
        struct skb_shared_info *shinfo;
        const struct tcphdr *th;
        struct tcphdr *thtail;
@@ -1767,6 +1768,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
        unsigned int hdrlen;
        bool fragstolen;
        u32 gso_segs;
+       u32 gso_size;
        int delta;
 
        /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
@@ -1792,13 +1794,6 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
         */
        th = (const struct tcphdr *)skb->data;
        hdrlen = th->doff * 4;
-       shinfo = skb_shinfo(skb);
-
-       if (!shinfo->gso_size)
-               shinfo->gso_size = skb->len - hdrlen;
-
-       if (!shinfo->gso_segs)
-               shinfo->gso_segs = 1;
 
        tail = sk->sk_backlog.tail;
        if (!tail)
@@ -1821,6 +1816,15 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
                goto no_coalesce;
 
        __skb_pull(skb, hdrlen);
+
+       shinfo = skb_shinfo(skb);
+       gso_size = shinfo->gso_size ?: skb->len;
+       gso_segs = shinfo->gso_segs ?: 1;
+
+       shinfo = skb_shinfo(tail);
+       tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen);
+       tail_gso_segs = shinfo->gso_segs ?: 1;
+
        if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
                TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
 
@@ -1847,11 +1851,8 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
                }
 
                /* Not as strict as GRO. We only need to carry mss max value */
-               skb_shinfo(tail)->gso_size = max(shinfo->gso_size,
-                                                skb_shinfo(tail)->gso_size);
-
-               gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs;
-               skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
+               shinfo->gso_size = max(gso_size, tail_gso_size);
+               shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF);
 
                sk->sk_backlog.len += delta;
                __NET_INC_STATS(sock_net(sk),
index f322e798a3519153472434a0a4a85449a2da20ce..ab458697881eda4503d6f223480deadf754d2d22 100644 (file)
@@ -4084,6 +4084,7 @@ void tcp_send_probe0(struct sock *sk)
                /* Cancel probe timer, if it is not required. */
                icsk->icsk_probes_out = 0;
                icsk->icsk_backoff = 0;
+               icsk->icsk_probes_tstamp = 0;
                return;
        }
 
index 6c62b9ea1320d9bbd26ed86b9f41de02fee6c491..faa92948441ba28a821034f18a6e4cc252da2c99 100644 (file)
@@ -219,14 +219,8 @@ static int tcp_write_timeout(struct sock *sk)
        int retry_until;
 
        if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
-               if (icsk->icsk_retransmits) {
-                       dst_negative_advice(sk);
-               } else {
-                       sk_rethink_txhash(sk);
-                       tp->timeout_rehash++;
-                       __NET_INC_STATS(sock_net(sk),
-                                       LINUX_MIB_TCPTIMEOUTREHASH);
-               }
+               if (icsk->icsk_retransmits)
+                       __dst_negative_advice(sk);
                retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
                expired = icsk->icsk_retransmits >= retry_until;
        } else {
@@ -234,12 +228,7 @@ static int tcp_write_timeout(struct sock *sk)
                        /* Black hole detection */
                        tcp_mtu_probing(icsk, sk);
 
-                       dst_negative_advice(sk);
-               } else {
-                       sk_rethink_txhash(sk);
-                       tp->timeout_rehash++;
-                       __NET_INC_STATS(sock_net(sk),
-                                       LINUX_MIB_TCPTIMEOUTREHASH);
+                       __dst_negative_advice(sk);
                }
 
                retry_until = net->ipv4.sysctl_tcp_retries2;
@@ -270,6 +259,11 @@ static int tcp_write_timeout(struct sock *sk)
                return 1;
        }
 
+       if (sk_rethink_txhash(sk)) {
+               tp->timeout_rehash++;
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH);
+       }
+
        return 0;
 }
 
@@ -349,6 +343,7 @@ static void tcp_probe_timer(struct sock *sk)
 
        if (tp->packets_out || !skb) {
                icsk->icsk_probes_out = 0;
+               icsk->icsk_probes_tstamp = 0;
                return;
        }
 
@@ -360,13 +355,12 @@ static void tcp_probe_timer(struct sock *sk)
         * corresponding system limit. We also implement similar policy when
         * we use RTO to probe window in tcp_retransmit_timer().
         */
-       if (icsk->icsk_user_timeout) {
-               u32 elapsed = tcp_model_timeout(sk, icsk->icsk_probes_out,
-                                               tcp_probe0_base(sk));
-
-               if (elapsed >= icsk->icsk_user_timeout)
-                       goto abort;
-       }
+       if (!icsk->icsk_probes_tstamp)
+               icsk->icsk_probes_tstamp = tcp_jiffies32;
+       else if (icsk->icsk_user_timeout &&
+                (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >=
+                msecs_to_jiffies(icsk->icsk_user_timeout))
+               goto abort;
 
        max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
        if (sock_flag(sk, SOCK_DEAD)) {
index 7103b0a89756e24203261684e88432615c344581..69ea76578abb95c613ef20f4fc0751e1ff81ac7b 100644 (file)
@@ -2555,7 +2555,8 @@ int udp_v4_early_demux(struct sk_buff *skb)
                 */
                if (!inet_sk(sk)->inet_daddr && in_dev)
                        return ip_mc_validate_source(skb, iph->daddr,
-                                                    iph->saddr, iph->tos,
+                                                    iph->saddr,
+                                                    iph->tos & IPTOS_RT_MASK,
                                                     skb->dev, in_dev, &itag);
        }
        return 0;
index eff2cacd52093962d17bb9798b21ed20d7b1a995..9edc5bb2d531aeffef448ef049ed8a745fe61544 100644 (file)
@@ -2467,8 +2467,9 @@ static void addrconf_add_mroute(struct net_device *dev)
                .fc_ifindex = dev->ifindex,
                .fc_dst_len = 8,
                .fc_flags = RTF_UP,
-               .fc_type = RTN_UNICAST,
+               .fc_type = RTN_MULTICAST,
                .fc_nlinfo.nl_net = dev_net(dev),
+               .fc_protocol = RTPROT_KERNEL,
        };
 
        ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
index 52c2f063529fbf9e33326eb6d3887f8c810b26eb..2b804fcebcc6511d32f18d99da596f9221e98551 100644 (file)
@@ -478,7 +478,6 @@ static int esp6_output_encap(struct xfrm_state *x, struct sk_buff *skb,
 int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
 {
        u8 *tail;
-       u8 *vaddr;
        int nfrags;
        int esph_offset;
        struct page *page;
@@ -519,14 +518,10 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
                        page = pfrag->page;
                        get_page(page);
 
-                       vaddr = kmap_atomic(page);
-
-                       tail = vaddr + pfrag->offset;
+                       tail = page_address(page) + pfrag->offset;
 
                        esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
 
-                       kunmap_atomic(vaddr);
-
                        nfrags = skb_shinfo(skb)->nr_frags;
 
                        __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
index 605cdd38a919a75ea6ee402d4e51f8a29d085b9a..f43e27555725115aa0a0ed4ba3a34869e3fc491f 100644 (file)
@@ -1025,6 +1025,8 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
 {
        struct fib6_table *table = rt->fib6_table;
 
+       /* Flush all cached dst in exception table */
+       rt6_flush_exceptions(rt);
        fib6_drop_pcpu_from(rt, table);
 
        if (rt->nh && !list_empty(&rt->nh_list))
@@ -1927,9 +1929,6 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
        net->ipv6.rt6_stats->fib_rt_entries--;
        net->ipv6.rt6_stats->fib_discarded_routes++;
 
-       /* Flush all cached dst in exception table */
-       rt6_flush_exceptions(rt);
-
        /* Reset round-robin state, if necessary */
        if (rcu_access_pointer(fn->rr_ptr) == rt)
                fn->rr_ptr = NULL;
index 749ad72386b232183315d43ab12efc9b90e841f8..077d43af8226bd202be878885eaaf274776bf648 100644 (file)
@@ -125,8 +125,43 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
        return -EINVAL;
 }
 
+static int
+ip6_finish_output_gso_slowpath_drop(struct net *net, struct sock *sk,
+                                   struct sk_buff *skb, unsigned int mtu)
+{
+       struct sk_buff *segs, *nskb;
+       netdev_features_t features;
+       int ret = 0;
+
+       /* Please see corresponding comment in ip_finish_output_gso
+        * describing the cases where GSO segment length exceeds the
+        * egress MTU.
+        */
+       features = netif_skb_features(skb);
+       segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+       if (IS_ERR_OR_NULL(segs)) {
+               kfree_skb(skb);
+               return -ENOMEM;
+       }
+
+       consume_skb(skb);
+
+       skb_list_walk_safe(segs, segs, nskb) {
+               int err;
+
+               skb_mark_not_on_list(segs);
+               err = ip6_fragment(net, sk, segs, ip6_finish_output2);
+               if (err && ret == 0)
+                       ret = err;
+       }
+
+       return ret;
+}
+
 static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+       unsigned int mtu;
+
 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
        /* Policy lookup after SNAT yielded a new policy */
        if (skb_dst(skb)->xfrm) {
@@ -135,7 +170,11 @@ static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff
        }
 #endif
 
-       if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
+       mtu = ip6_skb_dst_mtu(skb);
+       if (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu))
+               return ip6_finish_output_gso_slowpath_drop(net, sk, skb, mtu);
+
+       if ((skb->len > mtu && !skb_is_gso(skb)) ||
            dst_allfrag(skb_dst(skb)) ||
            (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
                return ip6_fragment(net, sk, skb, ip6_finish_output2);
index c4f532f4d311873391acc280746a3ce0e91398de..0d453fa9e327bde73da046a941361ce8a0052d35 100644 (file)
@@ -1598,7 +1598,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
        xt_compat_lock(AF_INET6);
        t = xt_find_table_lock(net, AF_INET6, get.name);
        if (!IS_ERR(t)) {
-               const struct xt_table_info *private = t->private;
+               const struct xt_table_info *private = xt_table_get_private_protected(t);
                struct xt_table_info info;
                ret = compat_table_info(private, &info);
                if (!ret && get.size == info.size)
index 2da0ee70377959b1c87e23fae244ac79195ad997..93636867aee28dac0d166a4919ee26c77cb692a3 100644 (file)
@@ -1645,8 +1645,11 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev,
        }
 
 #ifdef CONFIG_IPV6_SIT_6RD
-       if (ipip6_netlink_6rd_parms(data, &ip6rd))
+       if (ipip6_netlink_6rd_parms(data, &ip6rd)) {
                err = ipip6_tunnel_update_6rd(nt, &ip6rd);
+               if (err < 0)
+                       unregister_netdevice_queue(dev, NULL);
+       }
 #endif
 
        return err;
index 213ea7abc9ab25b51b5c028f79f31ed56752f572..40961889e9c01add01792690e61c16a04647cc4c 100644 (file)
@@ -489,6 +489,7 @@ static int lapb_device_event(struct notifier_block *this, unsigned long event,
                break;
        }
 
+       lapb_put(lapb);
        return NOTIFY_DONE;
 }
 
index 48f144f107d536eb96d08f438ad21e5a9e3c8c1e..9e723d943421907abb6664bbdad92c9b416efde0 100644 (file)
@@ -120,18 +120,17 @@ static ssize_t aqm_write(struct file *file,
 {
        struct ieee80211_local *local = file->private_data;
        char buf[100];
-       size_t len;
 
-       if (count > sizeof(buf))
+       if (count >= sizeof(buf))
                return -EINVAL;
 
        if (copy_from_user(buf, user_buf, count))
                return -EFAULT;
 
-       buf[sizeof(buf) - 1] = '\0';
-       len = strlen(buf);
-       if (len > 0 && buf[len-1] == '\n')
-               buf[len-1] = 0;
+       if (count && buf[count - 1] == '\n')
+               buf[count - 1] = '\0';
+       else
+               buf[count] = '\0';
 
        if (sscanf(buf, "fq_limit %u", &local->fq.limit) == 1)
                return count;
@@ -177,18 +176,17 @@ static ssize_t airtime_flags_write(struct file *file,
 {
        struct ieee80211_local *local = file->private_data;
        char buf[16];
-       size_t len;
 
-       if (count > sizeof(buf))
+       if (count >= sizeof(buf))
                return -EINVAL;
 
        if (copy_from_user(buf, user_buf, count))
                return -EFAULT;
 
-       buf[sizeof(buf) - 1] = 0;
-       len = strlen(buf);
-       if (len > 0 && buf[len - 1] == '\n')
-               buf[len - 1] = 0;
+       if (count && buf[count - 1] == '\n')
+               buf[count - 1] = '\0';
+       else
+               buf[count] = '\0';
 
        if (kstrtou16(buf, 0, &local->airtime_flags))
                return -EINVAL;
@@ -237,20 +235,19 @@ static ssize_t aql_txq_limit_write(struct file *file,
 {
        struct ieee80211_local *local = file->private_data;
        char buf[100];
-       size_t len;
        u32 ac, q_limit_low, q_limit_high, q_limit_low_old, q_limit_high_old;
        struct sta_info *sta;
 
-       if (count > sizeof(buf))
+       if (count >= sizeof(buf))
                return -EINVAL;
 
        if (copy_from_user(buf, user_buf, count))
                return -EFAULT;
 
-       buf[sizeof(buf) - 1] = 0;
-       len = strlen(buf);
-       if (len > 0 && buf[len - 1] == '\n')
-               buf[len - 1] = 0;
+       if (count && buf[count - 1] == '\n')
+               buf[count - 1] = '\0';
+       else
+               buf[count] = '\0';
 
        if (sscanf(buf, "%u %u %u", &ac, &q_limit_low, &q_limit_high) != 3)
                return -EINVAL;
@@ -306,18 +303,17 @@ static ssize_t force_tx_status_write(struct file *file,
 {
        struct ieee80211_local *local = file->private_data;
        char buf[3];
-       size_t len;
 
-       if (count > sizeof(buf))
+       if (count >= sizeof(buf))
                return -EINVAL;
 
        if (copy_from_user(buf, user_buf, count))
                return -EFAULT;
 
-       buf[sizeof(buf) - 1] = '\0';
-       len = strlen(buf);
-       if (len > 0 && buf[len - 1] == '\n')
-               buf[len - 1] = 0;
+       if (count && buf[count - 1] == '\n')
+               buf[count - 1] = '\0';
+       else
+               buf[count] = '\0';
 
        if (buf[0] == '0' && buf[1] == '\0')
                local->force_tx_status = 0;
index 13b9bcc4865deb38644d15bbe0cdc4ee28f5bdd2..972895e9f22dc5e4b977087bba880f9a40fcdcdb 100644 (file)
@@ -4176,6 +4176,8 @@ void ieee80211_check_fast_rx(struct sta_info *sta)
 
        rcu_read_lock();
        key = rcu_dereference(sta->ptk[sta->ptk_idx]);
+       if (!key)
+               key = rcu_dereference(sdata->default_unicast_key);
        if (key) {
                switch (key->conf.cipher) {
                case WLAN_CIPHER_SUITE_TKIP:
index 6422da6690f795890286113c3e6a46fbe271d4d7..ebb3228ce9718fd52bb2e481874508397fea47d0 100644 (file)
@@ -649,7 +649,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
                if (!skip_hw && tx->key &&
                    tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
                        info->control.hw_key = &tx->key->conf;
-       } else if (!ieee80211_is_mgmt(hdr->frame_control) && tx->sta &&
+       } else if (ieee80211_is_data_present(hdr->frame_control) && tx->sta &&
                   test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) {
                return TX_DROP;
        }
@@ -3809,7 +3809,7 @@ void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
                 * get immediately moved to the back of the list on the next
                 * call to ieee80211_next_txq().
                 */
-               if (txqi->txq.sta &&
+               if (txqi->txq.sta && local->airtime_flags &&
                    wiphy_ext_feature_isset(local->hw.wiphy,
                                            NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
                        list_add(&txqi->schedule_order,
@@ -4251,7 +4251,6 @@ netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
        struct ethhdr *ehdr = (struct ethhdr *)skb->data;
        struct ieee80211_key *key;
        struct sta_info *sta;
-       bool offload = true;
 
        if (unlikely(skb->len < ETH_HLEN)) {
                kfree_skb(skb);
@@ -4267,18 +4266,22 @@ netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
 
        if (unlikely(IS_ERR_OR_NULL(sta) || !sta->uploaded ||
            !test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
-               sdata->control_port_protocol == ehdr->h_proto))
-               offload = false;
-       else if ((key = rcu_dereference(sta->ptk[sta->ptk_idx])) &&
-                (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ||
-                 key->conf.cipher == WLAN_CIPHER_SUITE_TKIP))
-               offload = false;
-
-       if (offload)
-               ieee80211_8023_xmit(sdata, dev, sta, key, skb);
-       else
-               ieee80211_subif_start_xmit(skb, dev);
+           sdata->control_port_protocol == ehdr->h_proto))
+               goto skip_offload;
+
+       key = rcu_dereference(sta->ptk[sta->ptk_idx]);
+       if (!key)
+               key = rcu_dereference(sdata->default_unicast_key);
+
+       if (key && (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ||
+                   key->conf.cipher == WLAN_CIPHER_SUITE_TKIP))
+               goto skip_offload;
+
+       ieee80211_8023_xmit(sdata, dev, sta, key, skb);
+       goto out;
 
+skip_offload:
+       ieee80211_subif_start_xmit(skb, dev);
 out:
        rcu_read_unlock();
 
index 09b19aa2f2051a96c3d4a373922b91fb6dd4a0bc..f998a077c7dd04a319c1de11cb37a07fdbb3eb88 100644 (file)
@@ -427,7 +427,7 @@ static bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
 static bool tcp_can_send_ack(const struct sock *ssk)
 {
        return !((1 << inet_sk_state_load(ssk)) &
-              (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE));
+              (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE | TCPF_LISTEN));
 }
 
 static void mptcp_send_ack(struct mptcp_sock *msk)
@@ -877,6 +877,9 @@ static void __mptcp_wmem_reserve(struct sock *sk, int size)
        struct mptcp_sock *msk = mptcp_sk(sk);
 
        WARN_ON_ONCE(msk->wmem_reserved);
+       if (WARN_ON_ONCE(amount < 0))
+               amount = 0;
+
        if (amount <= sk->sk_forward_alloc)
                goto reserve;
 
@@ -1587,7 +1590,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
                return -EOPNOTSUPP;
 
-       mptcp_lock_sock(sk, __mptcp_wmem_reserve(sk, len));
+       mptcp_lock_sock(sk, __mptcp_wmem_reserve(sk, min_t(size_t, 1 << 20, len)));
 
        timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 
@@ -2639,11 +2642,17 @@ static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
 
 static int mptcp_disconnect(struct sock *sk, int flags)
 {
-       /* Should never be called.
-        * inet_stream_connect() calls ->disconnect, but that
-        * refers to the subflow socket, not the mptcp one.
-        */
-       WARN_ON_ONCE(1);
+       struct mptcp_subflow_context *subflow;
+       struct mptcp_sock *msk = mptcp_sk(sk);
+
+       __mptcp_flush_join_list(msk);
+       mptcp_for_each_subflow(msk, subflow) {
+               struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+               lock_sock(ssk);
+               tcp_disconnect(ssk, flags);
+               release_sock(ssk);
+       }
        return 0;
 }
 
@@ -3086,6 +3095,14 @@ bool mptcp_finish_join(struct sock *ssk)
        return true;
 }
 
+static void mptcp_shutdown(struct sock *sk, int how)
+{
+       pr_debug("sk=%p, how=%d", sk, how);
+
+       if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
+               __mptcp_wr_shutdown(sk);
+}
+
 static struct proto mptcp_prot = {
        .name           = "MPTCP",
        .owner          = THIS_MODULE,
@@ -3095,7 +3112,7 @@ static struct proto mptcp_prot = {
        .accept         = mptcp_accept,
        .setsockopt     = mptcp_setsockopt,
        .getsockopt     = mptcp_getsockopt,
-       .shutdown       = tcp_shutdown,
+       .shutdown       = mptcp_shutdown,
        .destroy        = mptcp_destroy,
        .sendmsg        = mptcp_sendmsg,
        .recvmsg        = mptcp_recvmsg,
@@ -3341,43 +3358,6 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
        return mask;
 }
 
-static int mptcp_shutdown(struct socket *sock, int how)
-{
-       struct mptcp_sock *msk = mptcp_sk(sock->sk);
-       struct sock *sk = sock->sk;
-       int ret = 0;
-
-       pr_debug("sk=%p, how=%d", msk, how);
-
-       lock_sock(sk);
-
-       how++;
-       if ((how & ~SHUTDOWN_MASK) || !how) {
-               ret = -EINVAL;
-               goto out_unlock;
-       }
-
-       if (sock->state == SS_CONNECTING) {
-               if ((1 << sk->sk_state) &
-                   (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
-                       sock->state = SS_DISCONNECTING;
-               else
-                       sock->state = SS_CONNECTED;
-       }
-
-       sk->sk_shutdown |= how;
-       if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
-               __mptcp_wr_shutdown(sk);
-
-       /* Wake up anyone sleeping in poll. */
-       sk->sk_state_change(sk);
-
-out_unlock:
-       release_sock(sk);
-
-       return ret;
-}
-
 static const struct proto_ops mptcp_stream_ops = {
        .family            = PF_INET,
        .owner             = THIS_MODULE,
@@ -3391,7 +3371,7 @@ static const struct proto_ops mptcp_stream_ops = {
        .ioctl             = inet_ioctl,
        .gettstamp         = sock_gettstamp,
        .listen            = mptcp_listen,
-       .shutdown          = mptcp_shutdown,
+       .shutdown          = inet_shutdown,
        .setsockopt        = sock_common_setsockopt,
        .getsockopt        = sock_common_getsockopt,
        .sendmsg           = inet_sendmsg,
@@ -3441,7 +3421,7 @@ static const struct proto_ops mptcp_v6_stream_ops = {
        .ioctl             = inet6_ioctl,
        .gettstamp         = sock_gettstamp,
        .listen            = mptcp_listen,
-       .shutdown          = mptcp_shutdown,
+       .shutdown          = inet_shutdown,
        .setsockopt        = sock_common_setsockopt,
        .getsockopt        = sock_common_getsockopt,
        .sendmsg           = inet6_sendmsg,
index 5b1f4ec66dd981c5029dd95a77c1a6566dea8c57..888ccc2d4e34b11b746bd6ec805496ea4dd9a220 100644 (file)
@@ -1120,7 +1120,7 @@ int ncsi_rcv_rsp(struct sk_buff *skb, struct net_device *dev,
        int payload, i, ret;
 
        /* Find the NCSI device */
-       nd = ncsi_find_dev(dev);
+       nd = ncsi_find_dev(orig_dev);
        ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
        if (!ndp)
                return -ENODEV;
index 5f1208ad049eed33d54b6f201b2a17ce19dcbd54..6186358eac7c5a255e48eb9670311694f0d920cd 100644 (file)
@@ -141,20 +141,6 @@ htable_size(u8 hbits)
        return hsize * sizeof(struct hbucket *) + sizeof(struct htable);
 }
 
-/* Compute htable_bits from the user input parameter hashsize */
-static u8
-htable_bits(u32 hashsize)
-{
-       /* Assume that hashsize == 2^htable_bits */
-       u8 bits = fls(hashsize - 1);
-
-       if (jhash_size(bits) != hashsize)
-               /* Round up to the first 2^n value */
-               bits = fls(hashsize);
-
-       return bits;
-}
-
 #ifdef IP_SET_HASH_WITH_NETS
 #if IPSET_NET_COUNT > 1
 #define __CIDR(cidr, i)                (cidr[i])
@@ -640,7 +626,7 @@ mtype_resize(struct ip_set *set, bool retried)
        struct htype *h = set->data;
        struct htable *t, *orig;
        u8 htable_bits;
-       size_t dsize = set->dsize;
+       size_t hsize, dsize = set->dsize;
 #ifdef IP_SET_HASH_WITH_NETS
        u8 flags;
        struct mtype_elem *tmp;
@@ -664,14 +650,12 @@ mtype_resize(struct ip_set *set, bool retried)
 retry:
        ret = 0;
        htable_bits++;
-       if (!htable_bits) {
-               /* In case we have plenty of memory :-) */
-               pr_warn("Cannot increase the hashsize of set %s further\n",
-                       set->name);
-               ret = -IPSET_ERR_HASH_FULL;
-               goto out;
-       }
-       t = ip_set_alloc(htable_size(htable_bits));
+       if (!htable_bits)
+               goto hbwarn;
+       hsize = htable_size(htable_bits);
+       if (!hsize)
+               goto hbwarn;
+       t = ip_set_alloc(hsize);
        if (!t) {
                ret = -ENOMEM;
                goto out;
@@ -813,6 +797,12 @@ cleanup:
        if (ret == -EAGAIN)
                goto retry;
        goto out;
+
+hbwarn:
+       /* In case we have plenty of memory :-) */
+       pr_warn("Cannot increase the hashsize of set %s further\n", set->name);
+       ret = -IPSET_ERR_HASH_FULL;
+       goto out;
 }
 
 /* Get the current number of elements and ext_size in the set  */
@@ -1521,7 +1511,11 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
        if (!h)
                return -ENOMEM;
 
-       hbits = htable_bits(hashsize);
+       /* Compute htable_bits from the user input parameter hashsize.
+        * Assume that hashsize == 2^htable_bits,
+        * otherwise round up to the first 2^n value.
+        */
+       hbits = fls(hashsize - 1);
        hsize = htable_size(hbits);
        if (hsize == 0) {
                kfree(h);
index 46c5557c1fecfd56bc81c86b85ce1dac2c025f9a..0ee702d374b028df2cc8fdaa3fe1cbd1450819db 100644 (file)
@@ -523,6 +523,9 @@ nf_conntrack_hash_sysctl(struct ctl_table *table, int write,
 {
        int ret;
 
+       /* module_param hashsize could have changed value */
+       nf_conntrack_htable_size_user = nf_conntrack_htable_size;
+
        ret = proc_dointvec(table, write, buffer, lenp, ppos);
        if (ret < 0 || !write)
                return ret;
index ea923f8cf9c4258bf0288169a6ecac73f039c25c..b7c3c902290f1486a981c841317334320e01b82c 100644 (file)
@@ -1174,6 +1174,7 @@ static int __init nf_nat_init(void)
        ret = register_pernet_subsys(&nat_net_ops);
        if (ret < 0) {
                nf_ct_extend_unregister(&nat_extend);
+               kvfree(nf_nat_bysource);
                return ret;
        }
 
index 8d5aa0ac45f4d3a22cdc3926df3c21722d6e35b9..15c467f1a9dd9daf84b8d3b21f0f070924d06fb5 100644 (file)
@@ -4162,7 +4162,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
                if (flags & ~(NFT_SET_ANONYMOUS | NFT_SET_CONSTANT |
                              NFT_SET_INTERVAL | NFT_SET_TIMEOUT |
                              NFT_SET_MAP | NFT_SET_EVAL |
-                             NFT_SET_OBJECT | NFT_SET_CONCAT))
+                             NFT_SET_OBJECT | NFT_SET_CONCAT | NFT_SET_EXPR))
                        return -EOPNOTSUPP;
                /* Only one of these operations is supported */
                if ((flags & (NFT_SET_MAP | NFT_SET_OBJECT)) ==
@@ -4304,6 +4304,10 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
                struct nlattr *tmp;
                int left;
 
+               if (!(flags & NFT_SET_EXPR)) {
+                       err = -EINVAL;
+                       goto err_set_alloc_name;
+               }
                i = 0;
                nla_for_each_nested(tmp, nla[NFTA_SET_EXPRESSIONS], left) {
                        if (i == NFT_SET_EXPR_MAX) {
@@ -5254,8 +5258,8 @@ static int nft_set_elem_expr_clone(const struct nft_ctx *ctx,
        return 0;
 
 err_expr:
-       for (k = i - 1; k >= 0; k++)
-               nft_expr_destroy(ctx, expr_array[i]);
+       for (k = i - 1; k >= 0; k--)
+               nft_expr_destroy(ctx, expr_array[k]);
 
        return -ENOMEM;
 }
index 983a1d5ca3ab5bb9b3f1cf8dc59208532bbd6d5b..0b053f75cd6047c1ba619dc540b7cf544699786d 100644 (file)
@@ -19,6 +19,7 @@ struct nft_dynset {
        enum nft_registers              sreg_key:8;
        enum nft_registers              sreg_data:8;
        bool                            invert;
+       bool                            expr;
        u8                              num_exprs;
        u64                             timeout;
        struct nft_expr                 *expr_array[NFT_SET_EXPR_MAX];
@@ -175,11 +176,12 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
 
        if (tb[NFTA_DYNSET_FLAGS]) {
                u32 flags = ntohl(nla_get_be32(tb[NFTA_DYNSET_FLAGS]));
-
-               if (flags & ~NFT_DYNSET_F_INV)
-                       return -EINVAL;
+               if (flags & ~(NFT_DYNSET_F_INV | NFT_DYNSET_F_EXPR))
+                       return -EOPNOTSUPP;
                if (flags & NFT_DYNSET_F_INV)
                        priv->invert = true;
+               if (flags & NFT_DYNSET_F_EXPR)
+                       priv->expr = true;
        }
 
        set = nft_set_lookup_global(ctx->net, ctx->table,
@@ -210,7 +212,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
        timeout = 0;
        if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
                if (!(set->flags & NFT_SET_TIMEOUT))
-                       return -EINVAL;
+                       return -EOPNOTSUPP;
 
                err = nf_msecs_to_jiffies64(tb[NFTA_DYNSET_TIMEOUT], &timeout);
                if (err)
@@ -224,7 +226,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
 
        if (tb[NFTA_DYNSET_SREG_DATA] != NULL) {
                if (!(set->flags & NFT_SET_MAP))
-                       return -EINVAL;
+                       return -EOPNOTSUPP;
                if (set->dtype == NFT_DATA_VERDICT)
                        return -EOPNOTSUPP;
 
@@ -261,6 +263,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
                struct nlattr *tmp;
                int left;
 
+               if (!priv->expr)
+                       return -EINVAL;
+
                i = 0;
                nla_for_each_nested(tmp, tb[NFTA_DYNSET_EXPRESSIONS], left) {
                        if (i == NFT_SET_EXPR_MAX) {
index 37253d399c6b8dd3ef1da16ab2ed78fd5567e2c6..0d5c422f87452f70c378f3144ecee13fcdcb9bb4 100644 (file)
@@ -115,6 +115,9 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
        } cfg;
        int ret;
 
+       if (strnlen(info->name, sizeof(est->name)) >= sizeof(est->name))
+               return -ENAMETOOLONG;
+
        net_get_random_once(&jhash_rnd, sizeof(jhash_rnd));
 
        mutex_lock(&xn->hash_lock);
index e64727e1a72f9c2ffb830de8492b1e64d2d7963b..02a1f13f07980fd16bef7008a184c891369f3626 100644 (file)
@@ -508,7 +508,7 @@ static int nci_open_device(struct nci_dev *ndev)
                };
                unsigned long opt = 0;
 
-               if (!(ndev->nci_ver & NCI_VER_2_MASK))
+               if (ndev->nci_ver & NCI_VER_2_MASK)
                        opt = (unsigned long)&nci_init_v2_cmd;
 
                rc = __nci_request(ndev, nci_init_req, opt,
index de8e8dbbdeb8c476addbb1d480aeb1ac848812b1..6bbc7a4485938304bb04fb68356395f3259805fc 100644 (file)
@@ -4595,7 +4595,9 @@ static void packet_seq_stop(struct seq_file *seq, void *v)
 static int packet_seq_show(struct seq_file *seq, void *v)
 {
        if (v == SEQ_START_TOKEN)
-               seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
+               seq_printf(seq,
+                          "%*sRefCnt Type Proto  Iface R Rmem   User   Inode\n",
+                          IS_ENABLED(CONFIG_64BIT) ? -17 : -9, "sk");
        else {
                struct sock *s = sk_entry(v);
                const struct packet_sock *po = pkt_sk(s);
index 56aaf8cb6527e2f05e8987ddcdbd8bffd99de0d7..8d00dfe8139e82b3eb1db7b956add80915485695 100644 (file)
@@ -755,7 +755,7 @@ static void qrtr_ns_data_ready(struct sock *sk)
        queue_work(qrtr_ns.workqueue, &qrtr_ns.work);
 }
 
-void qrtr_ns_init(void)
+int qrtr_ns_init(void)
 {
        struct sockaddr_qrtr sq;
        int ret;
@@ -766,7 +766,7 @@ void qrtr_ns_init(void)
        ret = sock_create_kern(&init_net, AF_QIPCRTR, SOCK_DGRAM,
                               PF_QIPCRTR, &qrtr_ns.sock);
        if (ret < 0)
-               return;
+               return ret;
 
        ret = kernel_getsockname(qrtr_ns.sock, (struct sockaddr *)&sq);
        if (ret < 0) {
@@ -797,12 +797,13 @@ void qrtr_ns_init(void)
        if (ret < 0)
                goto err_wq;
 
-       return;
+       return 0;
 
 err_wq:
        destroy_workqueue(qrtr_ns.workqueue);
 err_sock:
        sock_release(qrtr_ns.sock);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(qrtr_ns_init);
 
index f4ab3ca6d73b3cf4e9fc137572279b3620cbb908..b34358282f3798157206e3836517bba937daa5b9 100644 (file)
@@ -1287,13 +1287,19 @@ static int __init qrtr_proto_init(void)
                return rc;
 
        rc = sock_register(&qrtr_family);
-       if (rc) {
-               proto_unregister(&qrtr_proto);
-               return rc;
-       }
+       if (rc)
+               goto err_proto;
 
-       qrtr_ns_init();
+       rc = qrtr_ns_init();
+       if (rc)
+               goto err_sock;
 
+       return 0;
+
+err_sock:
+       sock_unregister(qrtr_family.family);
+err_proto:
+       proto_unregister(&qrtr_proto);
        return rc;
 }
 postcore_initcall(qrtr_proto_init);
index dc2b67f17927161e8928c0a5e6a2ad0a44378fdc..3f2d28696062a56201f8774ee50fd1c3daa50708 100644 (file)
@@ -29,7 +29,7 @@ void qrtr_endpoint_unregister(struct qrtr_endpoint *ep);
 
 int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len);
 
-void qrtr_ns_init(void);
+int qrtr_ns_init(void);
 
 void qrtr_ns_remove(void);
 
index 667c44aa5a63cb1abd90564ac67dccf094a3c0b2..dc201363f2c485fe12893a212829c45c91a99edd 100644 (file)
@@ -430,7 +430,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
                return;
        }
 
-       if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) {
+       if (state == RXRPC_CALL_SERVER_RECV_REQUEST) {
                unsigned long timo = READ_ONCE(call->next_req_timo);
                unsigned long now, expect_req_by;
 
index 9631aa8543b51e31ee9da5bb679b29cd39a401ec..8d2073e0e3da5deb0c207670052df280d45fbfd6 100644 (file)
@@ -598,7 +598,7 @@ static long rxrpc_read(const struct key *key,
                default: /* we have a ticket we can't encode */
                        pr_err("Unsupported key token type (%u)\n",
                               token->security_index);
-                       continue;
+                       return -ENOPKG;
                }
 
                _debug("token[%u]: toksize=%u", ntoks, toksize);
@@ -674,7 +674,9 @@ static long rxrpc_read(const struct key *key,
                        break;
 
                default:
-                       break;
+                       pr_err("Unsupported key token type (%u)\n",
+                              token->security_index);
+                       return -ENOPKG;
                }
 
                ASSERTCMP((unsigned long)xdr - (unsigned long)oldxdr, ==,
index 1319986693fc8826330e0a7dc49143d1d7518d64..84f932532db7dc39e23d946cd73e97ba042795a4 100644 (file)
@@ -1272,6 +1272,10 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
 
                nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
                msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
+               if (!nla_ok(nla_opt_msk, msk_depth)) {
+                       NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
+                       return -EINVAL;
+               }
        }
 
        nla_for_each_attr(nla_opt_key, nla_enc_key,
@@ -1307,9 +1311,6 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
                                NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
                                return -EINVAL;
                        }
-
-                       if (msk_depth)
-                               nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
                        break;
                case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
                        if (key->enc_opts.dst_opt_type) {
@@ -1340,9 +1341,6 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
                                NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
                                return -EINVAL;
                        }
-
-                       if (msk_depth)
-                               nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
                        break;
                case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
                        if (key->enc_opts.dst_opt_type) {
@@ -1373,14 +1371,20 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
                                NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
                                return -EINVAL;
                        }
-
-                       if (msk_depth)
-                               nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
                        break;
                default:
                        NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
                        return -EINVAL;
                }
+
+               if (!msk_depth)
+                       continue;
+
+               if (!nla_ok(nla_opt_msk, msk_depth)) {
+                       NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
+                       return -EINVAL;
+               }
+               nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
        }
 
        return 0;
index 78bec347b8b66f660e620dd715d0eb68f9bcd2d3..c4007b9cd16d6a200d943e3e0536d6b20022ba77 100644 (file)
@@ -366,9 +366,13 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
        if (tb[TCA_TCINDEX_MASK])
                cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
 
-       if (tb[TCA_TCINDEX_SHIFT])
+       if (tb[TCA_TCINDEX_SHIFT]) {
                cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
-
+               if (cp->shift > 16) {
+                       err = -EINVAL;
+                       goto errout;
+               }
+       }
        if (!cp->hash) {
                /* Hash not specified, use perfect hash if the upper limit
                 * of the hashing index is below the threshold.
index 51cb553e4317a3e2bca1996e0df004aab8111d58..6fe4e5cc807c90b046a16f014df43bfe841cbc43 100644 (file)
@@ -412,7 +412,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
 {
        struct qdisc_rate_table *rtab;
 
-       if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
+       if (tab == NULL || r->rate == 0 ||
+           r->cell_log == 0 || r->cell_log >= 32 ||
            nla_len(tab) != TC_RTAB_SIZE) {
                NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
                return NULL;
index bd618b00d3193242b1a166b5c48b3bccbcef5d2e..50f680f03a547ebe952eae9657b40d4de0fc5c9f 100644 (file)
@@ -362,7 +362,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
 
        ctl = nla_data(tb[TCA_CHOKE_PARMS]);
 
-       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
                return -EINVAL;
 
        if (ctl->limit > CHOKE_MAX_QUEUE)
index 8599c6f31b057f494f64941929a2012e5fddac5a..e0bc77533acc39d90dbbffc88241ea3063e27d3f 100644 (file)
@@ -480,7 +480,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
        struct gred_sched *table = qdisc_priv(sch);
        struct gred_sched_data *q = table->tab[dp];
 
-       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) {
+       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log)) {
                NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
                return -EINVAL;
        }
index e89fab6ccb34f733a474111a08128076b8c77198..b4ae34d7aa965decf541660ee195df92f646f799 100644 (file)
@@ -250,7 +250,7 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
        max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
 
        ctl = nla_data(tb[TCA_RED_PARMS]);
-       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
                return -EINVAL;
 
        err = red_get_flags(ctl->flags, TC_RED_HISTORIC_FLAGS,
index bca2be57d9fc1785d0c246861641c63021f2764c..b25e51440623bce56628ff33142c63739c4ac239 100644 (file)
@@ -647,7 +647,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
        }
 
        if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
-                                       ctl_v1->Wlog))
+                                       ctl_v1->Wlog, ctl_v1->Scell_log))
                return -EINVAL;
        if (ctl_v1 && ctl_v1->qth_min) {
                p = kmalloc(sizeof(*p), GFP_KERNEL);
index c74817ec9964b7eb744f972cca0777111a78f0f2..6f775275826a45a19c519fb06b80c53b304e61ce 100644 (file)
@@ -1605,8 +1605,9 @@ static void taprio_reset(struct Qdisc *sch)
 
        hrtimer_cancel(&q->advance_timer);
        if (q->qdiscs) {
-               for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
-                       qdisc_reset(q->qdiscs[i]);
+               for (i = 0; i < dev->num_tx_queues; i++)
+                       if (q->qdiscs[i])
+                               qdisc_reset(q->qdiscs[i]);
        }
        sch->qstats.backlog = 0;
        sch->q.qlen = 0;
@@ -1626,7 +1627,7 @@ static void taprio_destroy(struct Qdisc *sch)
        taprio_disable_offload(dev, q, NULL);
 
        if (q->qdiscs) {
-               for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
+               for (i = 0; i < dev->num_tx_queues; i++)
                        qdisc_put(q->qdiscs[i]);
 
                kfree(q->qdiscs);
index 59342b519e347cdca6681b80ab65ef22121d60c6..0df85a12651e96e87c97031316cd94585456cc08 100644 (file)
@@ -246,7 +246,8 @@ int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb)
                goto errattr;
        smc_clc_get_hostname(&host);
        if (host) {
-               snprintf(hostname, sizeof(hostname), "%s", host);
+               memcpy(hostname, host, SMC_MAX_HOSTNAME_LEN);
+               hostname[SMC_MAX_HOSTNAME_LEN] = 0;
                if (nla_put_string(skb, SMC_NLA_SYS_LOCAL_HOST, hostname))
                        goto errattr;
        }
@@ -257,7 +258,8 @@ int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb)
                smc_ism_get_system_eid(smcd_dev, &seid);
        mutex_unlock(&smcd_dev_list.mutex);
        if (seid && smc_ism_is_v2_capable()) {
-               snprintf(smc_seid, sizeof(smc_seid), "%s", seid);
+               memcpy(smc_seid, seid, SMC_MAX_EID_LEN);
+               smc_seid[SMC_MAX_EID_LEN] = 0;
                if (nla_put_string(skb, SMC_NLA_SYS_SEID, smc_seid))
                        goto errattr;
        }
@@ -295,7 +297,8 @@ static int smc_nl_fill_lgr(struct smc_link_group *lgr,
                goto errattr;
        if (nla_put_u8(skb, SMC_NLA_LGR_R_VLAN_ID, lgr->vlan_id))
                goto errattr;
-       snprintf(smc_target, sizeof(smc_target), "%s", lgr->pnet_id);
+       memcpy(smc_target, lgr->pnet_id, SMC_MAX_PNETID_LEN);
+       smc_target[SMC_MAX_PNETID_LEN] = 0;
        if (nla_put_string(skb, SMC_NLA_LGR_R_PNETID, smc_target))
                goto errattr;
 
@@ -312,7 +315,7 @@ static int smc_nl_fill_lgr_link(struct smc_link_group *lgr,
                                struct sk_buff *skb,
                                struct netlink_callback *cb)
 {
-       char smc_ibname[IB_DEVICE_NAME_MAX + 1];
+       char smc_ibname[IB_DEVICE_NAME_MAX];
        u8 smc_gid_target[41];
        struct nlattr *attrs;
        u32 link_uid = 0;
@@ -461,7 +464,8 @@ static int smc_nl_fill_smcd_lgr(struct smc_link_group *lgr,
                goto errattr;
        if (nla_put_u32(skb, SMC_NLA_LGR_D_CHID, smc_ism_get_chid(lgr->smcd)))
                goto errattr;
-       snprintf(smc_pnet, sizeof(smc_pnet), "%s", lgr->smcd->pnetid);
+       memcpy(smc_pnet, lgr->smcd->pnetid, SMC_MAX_PNETID_LEN);
+       smc_pnet[SMC_MAX_PNETID_LEN] = 0;
        if (nla_put_string(skb, SMC_NLA_LGR_D_PNETID, smc_pnet))
                goto errattr;
 
@@ -474,10 +478,12 @@ static int smc_nl_fill_smcd_lgr(struct smc_link_group *lgr,
                goto errv2attr;
        if (nla_put_u8(skb, SMC_NLA_LGR_V2_OS, lgr->peer_os))
                goto errv2attr;
-       snprintf(smc_host, sizeof(smc_host), "%s", lgr->peer_hostname);
+       memcpy(smc_host, lgr->peer_hostname, SMC_MAX_HOSTNAME_LEN);
+       smc_host[SMC_MAX_HOSTNAME_LEN] = 0;
        if (nla_put_string(skb, SMC_NLA_LGR_V2_PEER_HOST, smc_host))
                goto errv2attr;
-       snprintf(smc_eid, sizeof(smc_eid), "%s", lgr->negotiated_eid);
+       memcpy(smc_eid, lgr->negotiated_eid, SMC_MAX_EID_LEN);
+       smc_eid[SMC_MAX_EID_LEN] = 0;
        if (nla_put_string(skb, SMC_NLA_LGR_V2_NEG_EID, smc_eid))
                goto errv2attr;
 
index ddd7fac98b1d6558a5968d5e4a50e899d5492674..7d7ba0320d5aefc539838c0bf13e365e181d43a7 100644 (file)
@@ -371,8 +371,8 @@ static int smc_nl_handle_dev_port(struct sk_buff *skb,
        if (nla_put_u8(skb, SMC_NLA_DEV_PORT_PNET_USR,
                       smcibdev->pnetid_by_user[port]))
                goto errattr;
-       snprintf(smc_pnet, sizeof(smc_pnet), "%s",
-                (char *)&smcibdev->pnetid[port]);
+       memcpy(smc_pnet, &smcibdev->pnetid[port], SMC_MAX_PNETID_LEN);
+       smc_pnet[SMC_MAX_PNETID_LEN] = 0;
        if (nla_put_string(skb, SMC_NLA_DEV_PORT_PNETID, smc_pnet))
                goto errattr;
        if (nla_put_u32(skb, SMC_NLA_DEV_PORT_NETDEV,
@@ -414,7 +414,7 @@ static int smc_nl_handle_smcr_dev(struct smc_ib_device *smcibdev,
                                  struct sk_buff *skb,
                                  struct netlink_callback *cb)
 {
-       char smc_ibname[IB_DEVICE_NAME_MAX + 1];
+       char smc_ibname[IB_DEVICE_NAME_MAX];
        struct smc_pci_dev smc_pci_dev;
        struct pci_dev *pci_dev;
        unsigned char is_crit;
index 524ef64a191a5b31788c2750fbcb58876ee77874..9c6e95882553eb97374e3d080b581308bba91a89 100644 (file)
@@ -250,7 +250,8 @@ static int smc_nl_handle_smcd_dev(struct smcd_dev *smcd,
                goto errattr;
        if (nla_put_u8(skb, SMC_NLA_DEV_PORT_PNET_USR, smcd->pnetid_by_user))
                goto errportattr;
-       snprintf(smc_pnet, sizeof(smc_pnet), "%s", smcd->pnetid);
+       memcpy(smc_pnet, smcd->pnetid, SMC_MAX_PNETID_LEN);
+       smc_pnet[SMC_MAX_PNETID_LEN] = 0;
        if (nla_put_string(skb, SMC_NLA_DEV_PORT_PNETID, smc_pnet))
                goto errportattr;
 
index 010dcb876f9d72dc85b5e4ff327e927999b7d242..6e4dbd577a39fa701460ade9cd1fde4563b63057 100644 (file)
@@ -185,7 +185,7 @@ static int rpc_parse_scope_id(struct net *net, const char *buf,
                        scope_id = dev->ifindex;
                        dev_put(dev);
                } else {
-                       if (kstrtou32(p, 10, &scope_id) == 0) {
+                       if (kstrtou32(p, 10, &scope_id) != 0) {
                                kfree(p);
                                return 0;
                        }
index 5fb9164aa69059f77158a9f6ec796381474641ae..dcc50ae545506f39d04908f6168c36ddda350f6f 100644 (file)
@@ -857,6 +857,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
        err = -EAGAIN;
        if (len <= 0)
                goto out_release;
+       trace_svc_xdr_recvfrom(&rqstp->rq_arg);
 
        clear_bit(XPT_OLD, &xprt->xpt_flags);
 
@@ -866,7 +867,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
 
        if (serv->sv_stats)
                serv->sv_stats->netcnt++;
-       trace_svc_xdr_recvfrom(rqstp, &rqstp->rq_arg);
        return len;
 out_release:
        rqstp->rq_res.len = 0;
@@ -904,7 +904,7 @@ int svc_send(struct svc_rqst *rqstp)
        xb->len = xb->head[0].iov_len +
                xb->page_len +
                xb->tail[0].iov_len;
-       trace_svc_xdr_sendto(rqstp, xb);
+       trace_svc_xdr_sendto(rqstp->rq_xid, xb);
        trace_svc_stats_latency(rqstp);
 
        len = xprt->xpt_ops->xpo_sendto(rqstp);
index b248f2349437da03e1d2e7e5f4d44886779eef90..c9766d07eb81a1cdee4cad8bfc4a393114543e6f 100644 (file)
@@ -1062,6 +1062,90 @@ err_noclose:
        return 0;       /* record not complete */
 }
 
+static int svc_tcp_send_kvec(struct socket *sock, const struct kvec *vec,
+                             int flags)
+{
+       return kernel_sendpage(sock, virt_to_page(vec->iov_base),
+                              offset_in_page(vec->iov_base),
+                              vec->iov_len, flags);
+}
+
+/*
+ * kernel_sendpage() is used exclusively to reduce the number of
+ * copy operations in this path. Therefore the caller must ensure
+ * that the pages backing @xdr are unchanging.
+ *
+ * In addition, the logic assumes that * .bv_len is never larger
+ * than PAGE_SIZE.
+ */
+static int svc_tcp_sendmsg(struct socket *sock, struct msghdr *msg,
+                          struct xdr_buf *xdr, rpc_fraghdr marker,
+                          unsigned int *sentp)
+{
+       const struct kvec *head = xdr->head;
+       const struct kvec *tail = xdr->tail;
+       struct kvec rm = {
+               .iov_base       = &marker,
+               .iov_len        = sizeof(marker),
+       };
+       int flags, ret;
+
+       *sentp = 0;
+       xdr_alloc_bvec(xdr, GFP_KERNEL);
+
+       msg->msg_flags = MSG_MORE;
+       ret = kernel_sendmsg(sock, msg, &rm, 1, rm.iov_len);
+       if (ret < 0)
+               return ret;
+       *sentp += ret;
+       if (ret != rm.iov_len)
+               return -EAGAIN;
+
+       flags = head->iov_len < xdr->len ? MSG_MORE | MSG_SENDPAGE_NOTLAST : 0;
+       ret = svc_tcp_send_kvec(sock, head, flags);
+       if (ret < 0)
+               return ret;
+       *sentp += ret;
+       if (ret != head->iov_len)
+               goto out;
+
+       if (xdr->page_len) {
+               unsigned int offset, len, remaining;
+               struct bio_vec *bvec;
+
+               bvec = xdr->bvec;
+               offset = xdr->page_base;
+               remaining = xdr->page_len;
+               flags = MSG_MORE | MSG_SENDPAGE_NOTLAST;
+               while (remaining > 0) {
+                       if (remaining <= PAGE_SIZE && tail->iov_len == 0)
+                               flags = 0;
+                       len = min(remaining, bvec->bv_len);
+                       ret = kernel_sendpage(sock, bvec->bv_page,
+                                             bvec->bv_offset + offset,
+                                             len, flags);
+                       if (ret < 0)
+                               return ret;
+                       *sentp += ret;
+                       if (ret != len)
+                               goto out;
+                       remaining -= len;
+                       offset = 0;
+                       bvec++;
+               }
+       }
+
+       if (tail->iov_len) {
+               ret = svc_tcp_send_kvec(sock, tail, 0);
+               if (ret < 0)
+                       return ret;
+               *sentp += ret;
+       }
+
+out:
+       return 0;
+}
+
 /**
  * svc_tcp_sendto - Send out a reply on a TCP socket
  * @rqstp: completed svc_rqst
@@ -1089,7 +1173,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
        mutex_lock(&xprt->xpt_mutex);
        if (svc_xprt_is_dead(xprt))
                goto out_notconn;
-       err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, marker, &sent);
+       err = svc_tcp_sendmsg(svsk->sk_sock, &msg, xdr, marker, &sent);
        xdr_free_bvec(xdr);
        trace_svcsock_tcp_send(xprt, err < 0 ? err : sent);
        if (err < 0 || sent != (xdr->len + sizeof(marker)))
index 6ae2140eb4f7498f262bcb722f5d365188b20794..11510925943020b96436ab3a371f7e9e2a100dfe 100644 (file)
@@ -1030,7 +1030,6 @@ void tipc_link_reset(struct tipc_link *l)
 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
                   struct sk_buff_head *xmitq)
 {
-       struct tipc_msg *hdr = buf_msg(skb_peek(list));
        struct sk_buff_head *backlogq = &l->backlogq;
        struct sk_buff_head *transmq = &l->transmq;
        struct sk_buff *skb, *_skb;
@@ -1038,13 +1037,18 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
        u16 ack = l->rcv_nxt - 1;
        u16 seqno = l->snd_nxt;
        int pkt_cnt = skb_queue_len(list);
-       int imp = msg_importance(hdr);
        unsigned int mss = tipc_link_mss(l);
        unsigned int cwin = l->window;
        unsigned int mtu = l->mtu;
+       struct tipc_msg *hdr;
        bool new_bundle;
        int rc = 0;
+       int imp;
+
+       if (pkt_cnt <= 0)
+               return 0;
 
+       hdr = buf_msg(skb_peek(list));
        if (unlikely(msg_size(hdr) > mtu)) {
                pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
                        skb_queue_len(list), msg_user(hdr),
@@ -1053,6 +1057,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
                return -EMSGSIZE;
        }
 
+       imp = msg_importance(hdr);
        /* Allow oversubscription of one data msg per source at congestion */
        if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
                if (imp == TIPC_SYSTEM_IMPORTANCE) {
@@ -2539,7 +2544,7 @@ void tipc_link_set_queue_limits(struct tipc_link *l, u32 min_win, u32 max_win)
 }
 
 /**
- * link_reset_stats - reset link statistics
+ * tipc_link_reset_stats - reset link statistics
  * @l: pointer to link
  */
 void tipc_link_reset_stats(struct tipc_link *l)
index 83d9eb8305928d9bb1f8523a503b13c77546c2c1..008670d1f43e1c2e9153a706cc18cc8e8ba62a6c 100644 (file)
@@ -1665,7 +1665,7 @@ static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
 }
 
 /**
- * tipc_node_xmit() is the general link level function for message sending
+ * tipc_node_xmit() - general link level function for message sending
  * @net: the applicable net namespace
  * @list: chain of buffers containing message
  * @dnode: address of destination node
index 27026f587fa618c0e2f7e148700807aa120224f1..f620acd2a0f5e2cda436358d2b6c8eebad8e9d4b 100644 (file)
@@ -21,6 +21,7 @@ config CFG80211
        tristate "cfg80211 - wireless configuration API"
        depends on RFKILL || !RFKILL
        select FW_LOADER
+       select CRC32
        # may need to update this when certificates are changed and are
        # using a different algorithm, though right now they shouldn't
        # (this is here rather than below to allow it to be a module)
index bb72447ad960271ab1aaec43867202763f8bc6f6..8114bba8556c72a1ed173cb7c29232ed9dbdf7a0 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2008-2011 Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright      2017  Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2021 Intel Corporation
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -139,6 +139,11 @@ static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
        return rcu_dereference_rtnl(cfg80211_regdomain);
 }
 
+/*
+ * Returns the regulatory domain associated with the wiphy.
+ *
+ * Requires either RTNL or RCU protection
+ */
 const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy)
 {
        return rcu_dereference_rtnl(wiphy->regd);
@@ -2571,9 +2576,13 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
        if (IS_ERR(new_regd))
                return;
 
+       rtnl_lock();
+
        tmp = get_wiphy_regdom(wiphy);
        rcu_assign_pointer(wiphy->regd, new_regd);
        rcu_free_regdom(tmp);
+
+       rtnl_unlock();
 }
 EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
 
index ac4a317038f1bb90f4c515e813a3c371b301eaaa..4a83117507f5a867412e350b27577a7832e16c22 100644 (file)
@@ -108,9 +108,9 @@ EXPORT_SYMBOL(xsk_get_pool_from_qid);
 
 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
 {
-       if (queue_id < dev->real_num_rx_queues)
+       if (queue_id < dev->num_rx_queues)
                dev->_rx[queue_id].pool = NULL;
-       if (queue_id < dev->real_num_tx_queues)
+       if (queue_id < dev->num_tx_queues)
                dev->_tx[queue_id].pool = NULL;
 }
 
@@ -423,9 +423,9 @@ static void xsk_destruct_skb(struct sk_buff *skb)
        struct xdp_sock *xs = xdp_sk(skb->sk);
        unsigned long flags;
 
-       spin_lock_irqsave(&xs->tx_completion_lock, flags);
+       spin_lock_irqsave(&xs->pool->cq_lock, flags);
        xskq_prod_submit_addr(xs->pool->cq, addr);
-       spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
+       spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
 
        sock_wfree(skb);
 }
@@ -437,6 +437,7 @@ static int xsk_generic_xmit(struct sock *sk)
        bool sent_frame = false;
        struct xdp_desc desc;
        struct sk_buff *skb;
+       unsigned long flags;
        int err = 0;
 
        mutex_lock(&xs->mutex);
@@ -468,10 +469,13 @@ static int xsk_generic_xmit(struct sock *sk)
                 * if there is space in it. This avoids having to implement
                 * any buffering in the Tx path.
                 */
+               spin_lock_irqsave(&xs->pool->cq_lock, flags);
                if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) {
+                       spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
                        kfree_skb(skb);
                        goto out;
                }
+               spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
 
                skb->dev = xs->dev;
                skb->priority = sk->sk_priority;
@@ -483,6 +487,9 @@ static int xsk_generic_xmit(struct sock *sk)
                if  (err == NETDEV_TX_BUSY) {
                        /* Tell user-space to retry the send */
                        skb->destructor = sock_wfree;
+                       spin_lock_irqsave(&xs->pool->cq_lock, flags);
+                       xskq_prod_cancel(xs->pool->cq);
+                       spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
                        /* Free skb without triggering the perf drop trace */
                        consume_skb(skb);
                        err = -EAGAIN;
@@ -878,6 +885,10 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
                }
        }
 
+       /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
+       xs->fq_tmp = NULL;
+       xs->cq_tmp = NULL;
+
        xs->dev = dev;
        xs->zc = xs->umem->zc;
        xs->queue_id = qid;
@@ -1299,7 +1310,6 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
        xs->state = XSK_READY;
        mutex_init(&xs->mutex);
        spin_lock_init(&xs->rx_lock);
-       spin_lock_init(&xs->tx_completion_lock);
 
        INIT_LIST_HEAD(&xs->map_list);
        spin_lock_init(&xs->map_list_lock);
index 67a4494d63b681b80b2bb92428925f2edf5972ee..20598eea658c472fbea46f8365f7ca369c4b435a 100644 (file)
@@ -71,12 +71,11 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
        INIT_LIST_HEAD(&pool->free_list);
        INIT_LIST_HEAD(&pool->xsk_tx_list);
        spin_lock_init(&pool->xsk_tx_list_lock);
+       spin_lock_init(&pool->cq_lock);
        refcount_set(&pool->users, 1);
 
        pool->fq = xs->fq_tmp;
        pool->cq = xs->cq_tmp;
-       xs->fq_tmp = NULL;
-       xs->cq_tmp = NULL;
 
        for (i = 0; i < pool->free_heads_cnt; i++) {
                xskb = &pool->heads[i];
index 4a9663aa7afe6dc6c5e9ea3223263a42951ba9b3..2823b7c3302d0a24db4ce1f0720ca8094693bde7 100644 (file)
@@ -334,6 +334,11 @@ static inline bool xskq_prod_is_full(struct xsk_queue *q)
        return xskq_prod_nb_free(q, 1) ? false : true;
 }
 
+static inline void xskq_prod_cancel(struct xsk_queue *q)
+{
+       q->cached_prod--;
+}
+
 static inline int xskq_prod_reserve(struct xsk_queue *q)
 {
        if (xskq_prod_is_full(q))
index 00085308ed9da97a53467b0a8bbfc2613d3ccc2c..92e888ed939f9833a3b17f32ae7d15813ec4c46f 100755 (executable)
@@ -6646,6 +6646,12 @@ sub process {
 #                      }
 #              }
 
+# strlcpy uses that should likely be strscpy
+               if ($line =~ /\bstrlcpy\s*\(/) {
+                       WARN("STRLCPY",
+                            "Prefer strscpy over strlcpy - see: https://lore.kernel.org/r/CAHk-=wgfRnXz0W3D37d01q3JFkr_i_uTL=V6A6G1oUZcprmknw\@mail.gmail.com/\n" . $herecurr);
+               }
+
 # typecasts on min/max could be min_t/max_t
                if ($perl_version_ok &&
                    defined $stat &&
index 8c8d7c3d7accc77b31ea6e5f0e4749c35a2524d3..ff88e2faefd35c2d763a1d485eef39140efaa300 100755 (executable)
@@ -223,6 +223,7 @@ while [ "$1" != "" ] ; do
                ;;
 
        *)
+               echo "bad command: $CMD" >&2
                usage
                ;;
        esac
index e083bcae343f3e71290e433b7fd50861fd819ca4..3643b4f896eded47c9d8517ffc9585438bd62398 100755 (executable)
@@ -15,6 +15,8 @@ if ! test -r System.map ; then
        exit 0
 fi
 
+# legacy behavior: "depmod" in /sbin, no /sbin in PATH
+PATH="$PATH:/sbin"
 if [ -z $(command -v $DEPMOD) ]; then
        echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2
        echo "This is probably in the kmod package." >&2
index d66949bfeba45cbfa285264e4a842acd7ab06907..b5487cce69e8e8c09c60e6889feb2780f9b3adfa 100644 (file)
@@ -22,9 +22,9 @@ always-y += $(GCC_PLUGIN)
 GCC_PLUGINS_DIR = $(shell $(CC) -print-file-name=plugin)
 
 plugin_cxxflags        = -Wp,-MMD,$(depfile) $(KBUILD_HOSTCXXFLAGS) -fPIC \
-                  -I $(GCC_PLUGINS_DIR)/include -I $(obj) -std=gnu++98 \
+                  -I $(GCC_PLUGINS_DIR)/include -I $(obj) -std=gnu++11 \
                   -fno-rtti -fno-exceptions -fasynchronous-unwind-tables \
-                  -ggdb -Wno-narrowing -Wno-unused-variable -Wno-c++11-compat \
+                  -ggdb -Wno-narrowing -Wno-unused-variable \
                   -Wno-format-diag
 
 plugin_ldflags = -shared
index e46df0a2d4f9d200bee7ccaf3528431eca45206a..2c40e68853dde8adae1c921912172bb357c2ba48 100644 (file)
@@ -94,16 +94,6 @@ configfiles=$(wildcard $(srctree)/kernel/configs/$@ $(srctree)/arch/$(SRCARCH)/c
        $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh -m .config $(configfiles)
        $(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
 
-PHONY += kvmconfig
-kvmconfig: kvm_guest.config
-       @echo >&2 "WARNING: 'make $@' will be removed after Linux 5.10"
-       @echo >&2 "         Please use 'make $<' instead."
-
-PHONY += xenconfig
-xenconfig: xen.config
-       @echo >&2 "WARNING: 'make $@' will be removed after Linux 5.10"
-       @echo >&2 "         Please use 'make $<' instead."
-
 PHONY += tinyconfig
 tinyconfig:
        $(Q)$(MAKE) -f $(srctree)/Makefile allnoconfig tiny.config
index aa68ec95620d6095123c538e25193d627ba6107c..fcd4acd4e9cbc10002fd72c91b0ce3677d11f01e 100755 (executable)
@@ -33,7 +33,9 @@ if [ -f /usr/include/ncurses/ncurses.h ]; then
        exit 0
 fi
 
-if [ -f /usr/include/ncurses.h ]; then
+# As a final fallback before giving up, check if $HOSTCC knows of a default
+# ncurses installation (e.g. from a vendor-specific sysroot).
+if echo '#include <ncurses.h>' | "${HOSTCC}" -E - >/dev/null 2>&1; then
        echo cflags=\"-D_GNU_SOURCE\"
        echo libs=\"-lncurses\"
        exit 0
index 7d8026f3f377256292b5b7bd133d865b37f4b5a3..a0cd28cd31a852c50ca7f2500f64d91f6b60f53b 100644 (file)
@@ -275,7 +275,9 @@ static void dump_common_audit_data(struct audit_buffer *ab,
                struct inode *inode;
 
                audit_log_format(ab, " name=");
+               spin_lock(&a->u.dentry->d_lock);
                audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
+               spin_unlock(&a->u.dentry->d_lock);
 
                inode = d_backing_inode(a->u.dentry);
                if (inode) {
@@ -293,8 +295,9 @@ static void dump_common_audit_data(struct audit_buffer *ab,
                dentry = d_find_alias(inode);
                if (dentry) {
                        audit_log_format(ab, " name=");
-                       audit_log_untrustedstring(ab,
-                                        dentry->d_name.name);
+                       spin_lock(&dentry->d_lock);
+                       audit_log_untrustedstring(ab, dentry->d_name.name);
+                       spin_unlock(&dentry->d_lock);
                        dput(dentry);
                }
                audit_log_format(ab, " dev=");
index 7f82762ccc8c80ba791894c4add0a66ca718947e..ee7122c461d46f442cfdea976c337b97ac640bba 100644 (file)
@@ -88,7 +88,7 @@ static void transmit_midi_msg(struct snd_ff *ff, unsigned int port)
 
        /* Set interval to next transaction. */
        ff->next_ktime[port] = ktime_add_ns(ktime_get(),
-                               ff->rx_bytes[port] * 8 * NSEC_PER_SEC / 31250);
+                       ff->rx_bytes[port] * 8 * (NSEC_PER_SEC / 31250));
 
        if (quad_count == 1)
                tcode = TCODE_WRITE_QUADLET_REQUEST;
index 90288b4b46379526527d953ee7a9ca9f2fe9eb3e..a073cece4a7d5e3aa2a45c1d5d148eb940cf67ae 100644 (file)
@@ -209,7 +209,7 @@ static void midi_port_work(struct work_struct *work)
 
        /* Set interval to next transaction. */
        port->next_ktime = ktime_add_ns(ktime_get(),
-                               port->consume_bytes * 8 * NSEC_PER_SEC / 31250);
+                       port->consume_bytes * 8 * (NSEC_PER_SEC / 31250));
 
        /* Start this transaction. */
        port->idling = false;
index 6852668f1bcb47633bb366460f2b87c0fd910cb5..e4dd2ff5e47378905f3cae5d53a5770c76740dfd 100644 (file)
@@ -2220,8 +2220,6 @@ static const struct snd_pci_quirk power_save_denylist[] = {
        SND_PCI_QUIRK(0x1849, 0x7662, "Asrock H81M-HDS", 0),
        /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
        SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
-       /* https://bugzilla.redhat.com/show_bug.cgi?id=1581607 */
-       SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", 0),
        /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
        SND_PCI_QUIRK(0x1558, 0x6504, "Clevo W65_67SB", 0),
        /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
@@ -2600,7 +2598,8 @@ static const struct pci_device_id azx_ids[] = {
          .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
        /* ATI HDMI */
        { PCI_DEVICE(0x1002, 0x0002),
-         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
+         AZX_DCAPS_PM_RUNTIME },
        { PCI_DEVICE(0x1002, 0x1308),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0x157a),
@@ -2662,9 +2661,11 @@ static const struct pci_device_id azx_ids[] = {
        { PCI_DEVICE(0x1002, 0xaab0),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0xaac0),
-         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
+         AZX_DCAPS_PM_RUNTIME },
        { PCI_DEVICE(0x1002, 0xaac8),
-         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
+         AZX_DCAPS_PM_RUNTIME },
        { PCI_DEVICE(0x1002, 0xaad8),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
          AZX_DCAPS_PM_RUNTIME },
index 70164d1428d404661120da07b4f1baf3b0185df5..361cf2041911ad754970b9ee0f3a48cfedbd97e9 100644 (file)
@@ -388,7 +388,7 @@ static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev)
         * in powers of 2, next available ratio is 16 which can be
         * used as a limiting factor here.
         */
-       if (of_device_is_compatible(np, "nvidia,tegra194-hda"))
+       if (of_device_is_compatible(np, "nvidia,tegra30-hda"))
                chip->bus.core.sdo_limit = 16;
 
        /* codec detection */
index be5000dd158532bbecf342fc7f823153e62cd8b7..d49cc4409d59c091d643f9082116c1aa583eb252 100644 (file)
@@ -1070,6 +1070,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
 static const struct hda_device_id snd_hda_id_conexant[] = {
        HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto),
        HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto),
+       HDA_CODEC_ENTRY(0x14f120d0, "CX11970", patch_conexant_auto),
        HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto),
        HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto),
        HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto),
index 1e4a4b83fbf6fe246403ae4de7ae84710accea72..74d246a0dc6de2cedfa8f24ccb9d8378d7e3bf84 100644 (file)
@@ -1733,7 +1733,7 @@ static void silent_stream_disable(struct hda_codec *codec,
        per_pin->silent_stream = false;
 
  unlock_out:
-       mutex_unlock(&spec->pcm_lock);
+       mutex_unlock(&per_pin->lock);
 }
 
 /* update ELD and jack state via audio component */
index dde5ba20954157e7011c32c048eb0e3a658b3f94..dd82ff2bd5d65146a5ff3f06fc5cc8cf51b09b42 100644 (file)
@@ -6289,6 +6289,7 @@ enum {
        ALC221_FIXUP_HP_FRONT_MIC,
        ALC292_FIXUP_TPT460,
        ALC298_FIXUP_SPK_VOLUME,
+       ALC298_FIXUP_LENOVO_SPK_VOLUME,
        ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER,
        ALC269_FIXUP_ATIV_BOOK_8,
        ALC221_FIXUP_HP_MIC_NO_PRESENCE,
@@ -7119,6 +7120,10 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
        },
+       [ALC298_FIXUP_LENOVO_SPK_VOLUME] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc298_fixup_speaker_volume,
+       },
        [ALC295_FIXUP_DISABLE_DAC3] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc295_fixup_disable_dac3,
@@ -7885,7 +7890,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
-       SND_PCI_QUIRK(0x1028, 0x0a58, "Dell Precision 3650 Tower", ALC255_FIXUP_DELL_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0a58, "Dell", ALC255_FIXUP_DELL_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -7959,11 +7964,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+       SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+       SND_PCI_QUIRK(0x103c, 0x8780, "HP ZBook Fury 17 G7 Mobile Workstation",
+                     ALC285_FIXUP_HP_GPIO_AMP_INIT),
+       SND_PCI_QUIRK(0x103c, 0x8783, "HP ZBook Fury 15 G7 Mobile Workstation",
+                     ALC285_FIXUP_HP_GPIO_AMP_INIT),
+       SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
@@ -8021,6 +8032,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
        SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
        SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+       SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+       SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
        SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
        SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
@@ -8126,6 +8139,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
        SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
        SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
index 7ef8f3105cdb7fc98878276cc79affdda4f68798..0ab40a8a68fb50d3033cfc8306e44a18803e7841 100644 (file)
@@ -1002,6 +1002,7 @@ static const struct hda_verb vt1802_init_verbs[] = {
 enum {
        VIA_FIXUP_INTMIC_BOOST,
        VIA_FIXUP_ASUS_G75,
+       VIA_FIXUP_POWER_SAVE,
 };
 
 static void via_fixup_intmic_boost(struct hda_codec *codec,
@@ -1011,6 +1012,13 @@ static void via_fixup_intmic_boost(struct hda_codec *codec,
                override_mic_boost(codec, 0x30, 0, 2, 40);
 }
 
+static void via_fixup_power_save(struct hda_codec *codec,
+                                const struct hda_fixup *fix, int action)
+{
+       if (action == HDA_FIXUP_ACT_PRE_PROBE)
+               codec->power_save_node = 0;
+}
+
 static const struct hda_fixup via_fixups[] = {
        [VIA_FIXUP_INTMIC_BOOST] = {
                .type = HDA_FIXUP_FUNC,
@@ -1025,11 +1033,16 @@ static const struct hda_fixup via_fixups[] = {
                        { }
                }
        },
+       [VIA_FIXUP_POWER_SAVE] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = via_fixup_power_save,
+       },
 };
 
 static const struct snd_pci_quirk vt2002p_fixups[] = {
        SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75),
        SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST),
+       SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", VIA_FIXUP_POWER_SAVE),
        {}
 };
 
index 8c138e490f0c54fb20bb6800447eae53640c2802..d3536fd6a12400644a4cfa5b26795956fd2cedb3 100644 (file)
@@ -140,21 +140,14 @@ static int snd_acp3x_probe(struct pci_dev *pci,
                goto release_regions;
        }
 
-       /* check for msi interrupt support */
-       ret = pci_enable_msi(pci);
-       if (ret)
-               /* msi is not enabled */
-               irqflags = IRQF_SHARED;
-       else
-               /* msi is enabled */
-               irqflags = 0;
+       irqflags = IRQF_SHARED;
 
        addr = pci_resource_start(pci, 0);
        adata->acp3x_base = devm_ioremap(&pci->dev, addr,
                                        pci_resource_len(pci, 0));
        if (!adata->acp3x_base) {
                ret = -ENOMEM;
-               goto disable_msi;
+               goto release_regions;
        }
        pci_set_master(pci);
        pci_set_drvdata(pci, adata);
@@ -162,7 +155,7 @@ static int snd_acp3x_probe(struct pci_dev *pci,
        adata->pme_en = rv_readl(adata->acp3x_base + mmACP_PME_EN);
        ret = acp3x_init(adata);
        if (ret)
-               goto disable_msi;
+               goto release_regions;
 
        val = rv_readl(adata->acp3x_base + mmACP_I2S_PIN_CONFIG);
        switch (val) {
@@ -251,8 +244,6 @@ unregister_devs:
 de_init:
        if (acp3x_deinit(adata->acp3x_base))
                dev_err(&pci->dev, "ACP de-init failed\n");
-disable_msi:
-       pci_disable_msi(pci);
 release_regions:
        pci_release_regions(pci);
 disable_pci:
@@ -311,7 +302,6 @@ static void snd_acp3x_remove(struct pci_dev *pci)
                dev_err(&pci->dev, "ACP de-init failed\n");
        pm_runtime_forbid(&pci->dev);
        pm_runtime_get_noresume(&pci->dev);
-       pci_disable_msi(pci);
        pci_release_regions(pci);
        pci_disable_device(pci);
 }
index fa169bf09886fd046fef231230975e1aad11f461..deca8c7a0e8784b82458bc9b5d9d0d12adbd58e7 100644 (file)
@@ -171,6 +171,20 @@ static const struct dmi_system_id rn_acp_quirk_table[] = {
                        DMI_EXACT_MATCH(DMI_BOARD_NAME, "LNVNB161216"),
                }
        },
+       {
+               /* Lenovo ThinkPad E14 Gen 2 */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_EXACT_MATCH(DMI_BOARD_NAME, "20T6CTO1WW"),
+               }
+       },
+       {
+               /* Lenovo ThinkPad X395 */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_EXACT_MATCH(DMI_BOARD_NAME, "20NLCTO1WW"),
+               }
+       },
        {}
 };
 
index 142373ec411adb31ed33f5a7cf0a321c72c91de8..9fe9471f4514dc61ebf97678930fe48b7b184372 100644 (file)
@@ -143,7 +143,7 @@ config SND_MCHP_SOC_SPDIFTX
          - sama7g5
 
          This S/PDIF TX driver is compliant with IEC-60958 standard and
-         includes programable User Data and Channel Status fields.
+         includes programmable User Data and Channel Status fields.
 
 config SND_MCHP_SOC_SPDIFRX
        tristate "Microchip ASoC driver for boards using S/PDIF RX"
@@ -157,5 +157,5 @@ config SND_MCHP_SOC_SPDIFRX
          - sama7g5
 
          This S/PDIF RX driver is compliant with IEC-60958 standard and
-         includes programable User Data and Channel Status fields.
+         includes programmable User Data and Channel Status fields.
 endif
index ba4eb54aafcb9f4e181d9692fe331863537b6bc3..9bf6bfdaf11e4e6fc8f30403700855a9f38f46bd 100644 (file)
@@ -457,7 +457,7 @@ config SND_SOC_ADAU7118_HW
        help
          Enable support for the Analog Devices ADAU7118 8 Channel PDM-to-I2S/TDM
          Converter. In this mode, the device works in standalone mode which
-         means that there is no bus to comunicate with it. Stereo mode is not
+         means that there is no bus to communicate with it. Stereo mode is not
          supported in this mode.
 
          To compile this driver as a module, choose M here: the module
index 92921e34f9486708242b60d6dfd02828c3c8f834..85f6865019d4a82d9ac924294c32452c86f8d028 100644 (file)
 #include <sound/tlv.h>
 #include "max98373.h"
 
+static const u32 max98373_i2c_cache_reg[] = {
+       MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK,
+       MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK,
+       MAX98373_R20B6_BDE_CUR_STATE_READBACK,
+};
+
 static struct reg_default max98373_reg[] = {
        {MAX98373_R2000_SW_RESET, 0x00},
        {MAX98373_R2001_INT_RAW1, 0x00},
@@ -472,6 +478,11 @@ static struct snd_soc_dai_driver max98373_dai[] = {
 static int max98373_suspend(struct device *dev)
 {
        struct max98373_priv *max98373 = dev_get_drvdata(dev);
+       int i;
+
+       /* cache feedback register values before suspend */
+       for (i = 0; i < max98373->cache_num; i++)
+               regmap_read(max98373->regmap, max98373->cache[i].reg, &max98373->cache[i].val);
 
        regcache_cache_only(max98373->regmap, true);
        regcache_mark_dirty(max98373->regmap);
@@ -509,6 +520,7 @@ static int max98373_i2c_probe(struct i2c_client *i2c,
 {
        int ret = 0;
        int reg = 0;
+       int i;
        struct max98373_priv *max98373 = NULL;
 
        max98373 = devm_kzalloc(&i2c->dev, sizeof(*max98373), GFP_KERNEL);
@@ -534,6 +546,14 @@ static int max98373_i2c_probe(struct i2c_client *i2c,
                return ret;
        }
 
+       max98373->cache_num = ARRAY_SIZE(max98373_i2c_cache_reg);
+       max98373->cache = devm_kcalloc(&i2c->dev, max98373->cache_num,
+                                      sizeof(*max98373->cache),
+                                      GFP_KERNEL);
+
+       for (i = 0; i < max98373->cache_num; i++)
+               max98373->cache[i].reg = max98373_i2c_cache_reg[i];
+
        /* voltage/current slot & gpio configuration */
        max98373_slot_config(&i2c->dev, max98373);
 
index ec2e79c57357729835f6165b9eeb999832656135..b8d471d79e939e885553691565cbf036d3afaa4c 100644 (file)
@@ -23,6 +23,12 @@ struct sdw_stream_data {
        struct sdw_stream_runtime *sdw_stream;
 };
 
+static const u32 max98373_sdw_cache_reg[] = {
+       MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK,
+       MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK,
+       MAX98373_R20B6_BDE_CUR_STATE_READBACK,
+};
+
 static struct reg_default max98373_reg[] = {
        {MAX98373_R0040_SCP_INIT_STAT_1, 0x00},
        {MAX98373_R0041_SCP_INIT_MASK_1, 0x00},
@@ -245,6 +251,11 @@ static const struct regmap_config max98373_sdw_regmap = {
 static __maybe_unused int max98373_suspend(struct device *dev)
 {
        struct max98373_priv *max98373 = dev_get_drvdata(dev);
+       int i;
+
+       /* cache feedback register values before suspend */
+       for (i = 0; i < max98373->cache_num; i++)
+               regmap_read(max98373->regmap, max98373->cache[i].reg, &max98373->cache[i].val);
 
        regcache_cache_only(max98373->regmap, true);
 
@@ -757,6 +768,7 @@ static int max98373_init(struct sdw_slave *slave, struct regmap *regmap)
 {
        struct max98373_priv *max98373;
        int ret;
+       int i;
        struct device *dev = &slave->dev;
 
        /*  Allocate and assign private driver data structure  */
@@ -768,6 +780,14 @@ static int max98373_init(struct sdw_slave *slave, struct regmap *regmap)
        max98373->regmap = regmap;
        max98373->slave = slave;
 
+       max98373->cache_num = ARRAY_SIZE(max98373_sdw_cache_reg);
+       max98373->cache = devm_kcalloc(dev, max98373->cache_num,
+                                      sizeof(*max98373->cache),
+                                      GFP_KERNEL);
+
+       for (i = 0; i < max98373->cache_num; i++)
+               max98373->cache[i].reg = max98373_sdw_cache_reg[i];
+
        /* Read voltage and slot configuration */
        max98373_slot_config(dev, max98373);
 
index 929bb1798c43f9a5c03af199052a9682aae8ae34..31d571d4fac1ce590e51386759ede696ab7ddba6 100644 (file)
@@ -168,6 +168,31 @@ static SOC_ENUM_SINGLE_DECL(max98373_adc_samplerate_enum,
                            MAX98373_R2051_MEAS_ADC_SAMPLING_RATE, 0,
                            max98373_ADC_samplerate_text);
 
+static int max98373_feedback_get(struct snd_kcontrol *kcontrol,
+                                struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+       struct soc_mixer_control *mc =
+               (struct soc_mixer_control *)kcontrol->private_value;
+       struct max98373_priv *max98373 = snd_soc_component_get_drvdata(component);
+       int i;
+
+       if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF) {
+               /*
+                * Register values will be cached before suspend. The cached value
+                * will be a valid value and userspace will happy with that.
+                */
+               for (i = 0; i < max98373->cache_num; i++) {
+                       if (mc->reg == max98373->cache[i].reg) {
+                               ucontrol->value.integer.value[0] = max98373->cache[i].val;
+                               return 0;
+                       }
+               }
+       }
+
+       return snd_soc_put_volsw(kcontrol, ucontrol);
+}
+
 static const struct snd_kcontrol_new max98373_snd_controls[] = {
 SOC_SINGLE("Digital Vol Sel Switch", MAX98373_R203F_AMP_DSP_CFG,
        MAX98373_AMP_VOL_SEL_SHIFT, 1, 0),
@@ -209,8 +234,10 @@ SOC_SINGLE("ADC PVDD FLT Switch", MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG,
        MAX98373_FLT_EN_SHIFT, 1, 0),
 SOC_SINGLE("ADC TEMP FLT Switch", MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG,
        MAX98373_FLT_EN_SHIFT, 1, 0),
-SOC_SINGLE("ADC PVDD", MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK, 0, 0xFF, 0),
-SOC_SINGLE("ADC TEMP", MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK, 0, 0xFF, 0),
+SOC_SINGLE_EXT("ADC PVDD", MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK, 0, 0xFF, 0,
+       max98373_feedback_get, NULL),
+SOC_SINGLE_EXT("ADC TEMP", MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK, 0, 0xFF, 0,
+       max98373_feedback_get, NULL),
 SOC_SINGLE("ADC PVDD FLT Coeff", MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG,
        0, 0x3, 0),
 SOC_SINGLE("ADC TEMP FLT Coeff", MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG,
@@ -226,7 +253,8 @@ SOC_SINGLE("BDE LVL1 Thresh", MAX98373_R2097_BDE_L1_THRESH, 0, 0xFF, 0),
 SOC_SINGLE("BDE LVL2 Thresh", MAX98373_R2098_BDE_L2_THRESH, 0, 0xFF, 0),
 SOC_SINGLE("BDE LVL3 Thresh", MAX98373_R2099_BDE_L3_THRESH, 0, 0xFF, 0),
 SOC_SINGLE("BDE LVL4 Thresh", MAX98373_R209A_BDE_L4_THRESH, 0, 0xFF, 0),
-SOC_SINGLE("BDE Active Level", MAX98373_R20B6_BDE_CUR_STATE_READBACK, 0, 8, 0),
+SOC_SINGLE_EXT("BDE Active Level", MAX98373_R20B6_BDE_CUR_STATE_READBACK, 0, 8, 0,
+       max98373_feedback_get, NULL),
 SOC_SINGLE("BDE Clip Mode Switch", MAX98373_R2092_BDE_CLIPPER_MODE, 0, 1, 0),
 SOC_SINGLE("BDE Thresh Hysteresis", MAX98373_R209B_BDE_THRESH_HYST, 0, 0xFF, 0),
 SOC_SINGLE("BDE Hold Time", MAX98373_R2090_BDE_LVL_HOLD, 0, 0xFF, 0),
index 4ab29b9d51c74edd602d036e13c110b14ceb56f1..71f5a5228f34b1629644723bfb5bd47808bc6900 100644 (file)
 /* MAX98373_R2000_SW_RESET */
 #define MAX98373_SOFT_RESET (0x1 << 0)
 
+struct max98373_cache {
+       u32 reg;
+       u32 val;
+};
+
 struct max98373_priv {
        struct regmap *regmap;
        int reset_gpio;
@@ -212,6 +217,9 @@ struct max98373_priv {
        bool interleave_mode;
        unsigned int ch_size;
        bool tdm_mode;
+       /* cache for reading a valid fake feedback value */
+       struct max98373_cache *cache;
+       int cache_num;
        /* variables to support soundwire */
        struct sdw_slave *slave;
        bool hw_init;
index 5771c02c34596f5d5cc4f06f243f57f771ef95cc..85f744184a60fe4302baf6e9a58f0190d23ab5a3 100644 (file)
@@ -462,6 +462,8 @@ static int rt711_set_amp_gain_put(struct snd_kcontrol *kcontrol,
        unsigned int read_ll, read_rl;
        int i;
 
+       mutex_lock(&rt711->calibrate_mutex);
+
        /* Can't use update bit function, so read the original value first */
        addr_h = mc->reg;
        addr_l = mc->rreg;
@@ -547,6 +549,8 @@ static int rt711_set_amp_gain_put(struct snd_kcontrol *kcontrol,
        if (dapm->bias_level <= SND_SOC_BIAS_STANDBY)
                regmap_write(rt711->regmap,
                                RT711_SET_AUDIO_POWER_STATE, AC_PWRST_D3);
+
+       mutex_unlock(&rt711->calibrate_mutex);
        return 0;
 }
 
@@ -859,9 +863,11 @@ static int rt711_set_bias_level(struct snd_soc_component *component,
                break;
 
        case SND_SOC_BIAS_STANDBY:
+               mutex_lock(&rt711->calibrate_mutex);
                regmap_write(rt711->regmap,
                        RT711_SET_AUDIO_POWER_STATE,
                        AC_PWRST_D3);
+               mutex_unlock(&rt711->calibrate_mutex);
                break;
 
        default:
index 2c2a76a719401daa2751a1d72391164fe101f205..ede4a9ad1054cee66e49b2f1e85de85048103153 100644 (file)
@@ -164,6 +164,7 @@ static int imx_hdmi_probe(struct platform_device *pdev)
 
        if ((hdmi_out && hdmi_in) || (!hdmi_out && !hdmi_in)) {
                dev_err(&pdev->dev, "Invalid HDMI DAI link\n");
+               ret = -EINVAL;
                goto fail;
        }
 
index c55d1239e705b50e639cb543bb29a383510748dd..c763bfeb1f38fbfe89f3462a6ad866115ecc4d1f 100644 (file)
@@ -189,6 +189,7 @@ static struct platform_driver haswell_audio = {
        .probe = haswell_audio_probe,
        .driver = {
                .name = "haswell-audio",
+               .pm = &snd_soc_pm_ops,
        },
 };
 
index fcd8dff27ae8e837f88ab83e00920b0eded02e31..1275c149acc021986feb2048c12c63d91d75fe89 100644 (file)
@@ -224,6 +224,7 @@ static int cnl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
                                "dsp boot timeout, status=%#x error=%#x\n",
                                sst_dsp_shim_read(ctx, CNL_ADSP_FW_STATUS),
                                sst_dsp_shim_read(ctx, CNL_ADSP_ERROR_CODE));
+                       ret = -ETIMEDOUT;
                        goto err;
                }
        } else {
index c8664ab80d45ad0b0774370221938a9d79f75620..87cac440b36933dc6180c13b9dec766d7b0725e5 100644 (file)
@@ -467,8 +467,20 @@ static int axg_tdm_iface_set_bias_level(struct snd_soc_component *component,
        return ret;
 }
 
+static const struct snd_soc_dapm_widget axg_tdm_iface_dapm_widgets[] = {
+       SND_SOC_DAPM_SIGGEN("Playback Signal"),
+};
+
+static const struct snd_soc_dapm_route axg_tdm_iface_dapm_routes[] = {
+       { "Loopback", NULL, "Playback Signal" },
+};
+
 static const struct snd_soc_component_driver axg_tdm_iface_component_drv = {
-       .set_bias_level = axg_tdm_iface_set_bias_level,
+       .dapm_widgets           = axg_tdm_iface_dapm_widgets,
+       .num_dapm_widgets       = ARRAY_SIZE(axg_tdm_iface_dapm_widgets),
+       .dapm_routes            = axg_tdm_iface_dapm_routes,
+       .num_dapm_routes        = ARRAY_SIZE(axg_tdm_iface_dapm_routes),
+       .set_bias_level         = axg_tdm_iface_set_bias_level,
 };
 
 static const struct of_device_id axg_tdm_iface_of_match[] = {
index 88ed95ae886bb506cda8c8ac74dae2babcd568c0..b4faf9d5c1aad15edb2ab34cf13b87e926c7fc87 100644 (file)
@@ -224,15 +224,6 @@ static const struct axg_tdm_formatter_ops axg_tdmin_ops = {
 };
 
 static const struct axg_tdm_formatter_driver axg_tdmin_drv = {
-       .component_drv  = &axg_tdmin_component_drv,
-       .regmap_cfg     = &axg_tdmin_regmap_cfg,
-       .ops            = &axg_tdmin_ops,
-       .quirks         = &(const struct axg_tdm_formatter_hw) {
-               .skew_offset    = 2,
-       },
-};
-
-static const struct axg_tdm_formatter_driver g12a_tdmin_drv = {
        .component_drv  = &axg_tdmin_component_drv,
        .regmap_cfg     = &axg_tdmin_regmap_cfg,
        .ops            = &axg_tdmin_ops,
@@ -247,10 +238,10 @@ static const struct of_device_id axg_tdmin_of_match[] = {
                .data = &axg_tdmin_drv,
        }, {
                .compatible = "amlogic,g12a-tdmin",
-               .data = &g12a_tdmin_drv,
+               .data = &axg_tdmin_drv,
        }, {
                .compatible = "amlogic,sm1-tdmin",
-               .data = &g12a_tdmin_drv,
+               .data = &axg_tdmin_drv,
        }, {}
 };
 MODULE_DEVICE_TABLE(of, axg_tdmin_of_match);
index af684fd19ab9e7b50f8f3147bb32e66ff1c2c2aa..c5e99c2d89c7ed65b3ae4eacd69a449df5b37d4b 100644 (file)
@@ -270,18 +270,6 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
        struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
        unsigned int id = dai->driver->id;
        int ret = -EINVAL;
-       unsigned int val = 0;
-
-       ret = regmap_read(drvdata->lpaif_map,
-                               LPAIF_I2SCTL_REG(drvdata->variant, dai->driver->id), &val);
-       if (ret) {
-               dev_err(dai->dev, "error reading from i2sctl reg: %d\n", ret);
-               return ret;
-       }
-       if (val == LPAIF_I2SCTL_RESET_STATE) {
-               dev_err(dai->dev, "error in i2sctl register state\n");
-               return -ENOTRECOVERABLE;
-       }
 
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
@@ -454,20 +442,16 @@ static bool lpass_cpu_regmap_volatile(struct device *dev, unsigned int reg)
        struct lpass_variant *v = drvdata->variant;
        int i;
 
-       for (i = 0; i < v->i2s_ports; ++i)
-               if (reg == LPAIF_I2SCTL_REG(v, i))
-                       return true;
        for (i = 0; i < v->irq_ports; ++i)
                if (reg == LPAIF_IRQSTAT_REG(v, i))
                        return true;
 
        for (i = 0; i < v->rdma_channels; ++i)
-               if (reg == LPAIF_RDMACURR_REG(v, i) || reg == LPAIF_RDMACTL_REG(v, i))
+               if (reg == LPAIF_RDMACURR_REG(v, i))
                        return true;
 
        for (i = 0; i < v->wrdma_channels; ++i)
-               if (reg == LPAIF_WRDMACURR_REG(v, i + v->wrdma_channel_start) ||
-                       reg == LPAIF_WRDMACTL_REG(v, i + v->wrdma_channel_start))
+               if (reg == LPAIF_WRDMACURR_REG(v, i + v->wrdma_channel_start))
                        return true;
 
        return false;
index 80b09dede5f9cbefb4f6a6a7d968ea2166ada871..d1c248590f3ab6da7622dcf8d1909d74c340c88a 100644 (file)
@@ -452,7 +452,6 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
        unsigned int reg_irqclr = 0, val_irqclr = 0;
        unsigned int  reg_irqen = 0, val_irqen = 0, val_mask = 0;
        unsigned int dai_id = cpu_dai->driver->id;
-       unsigned int dma_ctrl_reg = 0;
 
        ch = pcm_data->dma_ch;
        if (dir ==  SNDRV_PCM_STREAM_PLAYBACK) {
@@ -469,17 +468,7 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
                id = pcm_data->dma_ch - v->wrdma_channel_start;
                map = drvdata->lpaif_map;
        }
-       ret = regmap_read(map, LPAIF_DMACTL_REG(v, ch, dir, dai_id), &dma_ctrl_reg);
-       if (ret) {
-               dev_err(soc_runtime->dev, "error reading from rdmactl reg: %d\n", ret);
-               return ret;
-       }
 
-       if (dma_ctrl_reg == LPAIF_DMACTL_RESET_STATE ||
-               dma_ctrl_reg == LPAIF_DMACTL_RESET_STATE + 1) {
-               dev_err(soc_runtime->dev, "error in rdmactl register state\n");
-               return -ENOTRECOVERABLE;
-       }
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
        case SNDRV_PCM_TRIGGER_RESUME:
@@ -500,7 +489,6 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
                                        "error writing to rdmactl reg: %d\n", ret);
                                return ret;
                        }
-                       map = drvdata->hdmiif_map;
                        reg_irqclr = LPASS_HDMITX_APP_IRQCLEAR_REG(v);
                        val_irqclr = (LPAIF_IRQ_ALL(ch) |
                                        LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(ch) |
@@ -519,7 +507,6 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
                        break;
                case MI2S_PRIMARY:
                case MI2S_SECONDARY:
-                       map = drvdata->lpaif_map;
                        reg_irqclr = LPAIF_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
                        val_irqclr = LPAIF_IRQ_ALL(ch);
 
@@ -563,7 +550,6 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
                                        "error writing to rdmactl reg: %d\n", ret);
                                return ret;
                        }
-                       map = drvdata->hdmiif_map;
                        reg_irqen = LPASS_HDMITX_APP_IRQEN_REG(v);
                        val_mask = (LPAIF_IRQ_ALL(ch) |
                                        LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(ch) |
@@ -573,7 +559,6 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
                        break;
                case MI2S_PRIMARY:
                case MI2S_SECONDARY:
-                       map = drvdata->lpaif_map;
                        reg_irqen = LPAIF_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
                        val_mask = LPAIF_IRQ_ALL(ch);
                        val_irqen = 0;
@@ -838,6 +823,39 @@ static void lpass_platform_pcm_free(struct snd_soc_component *component,
        }
 }
 
+static int lpass_platform_pcmops_suspend(struct snd_soc_component *component)
+{
+       struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+       struct regmap *map;
+       unsigned int dai_id = component->id;
+
+       if (dai_id == LPASS_DP_RX)
+               map = drvdata->hdmiif_map;
+       else
+               map = drvdata->lpaif_map;
+
+       regcache_cache_only(map, true);
+       regcache_mark_dirty(map);
+
+       return 0;
+}
+
+static int lpass_platform_pcmops_resume(struct snd_soc_component *component)
+{
+       struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+       struct regmap *map;
+       unsigned int dai_id = component->id;
+
+       if (dai_id == LPASS_DP_RX)
+               map = drvdata->hdmiif_map;
+       else
+               map = drvdata->lpaif_map;
+
+       regcache_cache_only(map, false);
+       return regcache_sync(map);
+}
+
+
 static const struct snd_soc_component_driver lpass_component_driver = {
        .name           = DRV_NAME,
        .open           = lpass_platform_pcmops_open,
@@ -850,6 +868,8 @@ static const struct snd_soc_component_driver lpass_component_driver = {
        .mmap           = lpass_platform_pcmops_mmap,
        .pcm_construct  = lpass_platform_pcm_new,
        .pcm_destruct   = lpass_platform_pcm_free,
+       .suspend                = lpass_platform_pcmops_suspend,
+       .resume                 = lpass_platform_pcmops_resume,
 
 };
 
index b9aacf3d3b29c9b0de99830a568371edbffffbe8..abdfd9cf91e2a06e2db05193dbe3ab0a31cc2307 100644 (file)
@@ -366,25 +366,27 @@ void rsnd_adg_clk_control(struct rsnd_priv *priv, int enable)
        struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
        struct device *dev = rsnd_priv_to_dev(priv);
        struct clk *clk;
-       int i, ret;
+       int i;
 
        for_each_rsnd_clk(clk, adg, i) {
-               ret = 0;
                if (enable) {
-                       ret = clk_prepare_enable(clk);
+                       int ret = clk_prepare_enable(clk);
 
                        /*
                         * We shouldn't use clk_get_rate() under
                         * atomic context. Let's keep it when
                         * rsnd_adg_clk_enable() was called
                         */
-                       adg->clk_rate[i] = clk_get_rate(adg->clk[i]);
+                       adg->clk_rate[i] = 0;
+                       if (ret < 0)
+                               dev_warn(dev, "can't use clk %d\n", i);
+                       else
+                               adg->clk_rate[i] = clk_get_rate(clk);
                } else {
-                       clk_disable_unprepare(clk);
+                       if (adg->clk_rate[i])
+                               clk_disable_unprepare(clk);
+                       adg->clk_rate[i] = 0;
                }
-
-               if (ret < 0)
-                       dev_warn(dev, "can't use clk %d\n", i);
        }
 }
 
index 9f0c86cbdcca2160d4e75c0db226f36b5b86a9aa..2b75d0139e478c6f1feaff88601ecb15933c6221 100644 (file)
@@ -2486,6 +2486,7 @@ void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w)
        enum snd_soc_dapm_direction dir;
 
        list_del(&w->list);
+       list_del(&w->dirty);
        /*
         * remove source and sink paths associated to this widget.
         * While removing the path, remove reference to it from both
index 031dad5fc4c701b8107828927e34ae7c2513eb63..3e8b6c035ce3ffba2cbe48c7b69597d3a71ecc0b 100644 (file)
@@ -122,7 +122,7 @@ config SND_SOC_SOF_DEBUG_XRUN_STOP
        bool "SOF stop on XRUN"
        help
          This option forces PCMs to stop on any XRUN event. This is useful to
-         preserve any trace data ond pipeline status prior to the XRUN.
+         preserve any trace data and pipeline status prior to the XRUN.
          Say Y if you are debugging SOF FW pipeline XRUNs.
          If unsure select "N".
 
index d731ca62d59943304fc552f7fa839bd035227cb6..e08fbf8e3ee0f66027d6989e26c846edc4d563fb 100644 (file)
@@ -450,10 +450,8 @@ lookup_device_name(u32 id)
 static void snd_usb_audio_free(struct snd_card *card)
 {
        struct snd_usb_audio *chip = card->private_data;
-       struct snd_usb_endpoint *ep, *n;
 
-       list_for_each_entry_safe(ep, n, &chip->ep_list, list)
-               snd_usb_endpoint_free(ep);
+       snd_usb_endpoint_free_all(chip);
 
        mutex_destroy(&chip->mutex);
        if (!atomic_read(&chip->shutdown))
@@ -611,6 +609,7 @@ static int snd_usb_audio_create(struct usb_interface *intf,
        chip->usb_id = usb_id;
        INIT_LIST_HEAD(&chip->pcm_list);
        INIT_LIST_HEAD(&chip->ep_list);
+       INIT_LIST_HEAD(&chip->iface_ref_list);
        INIT_LIST_HEAD(&chip->midi_list);
        INIT_LIST_HEAD(&chip->mixer_list);
 
index 6a027c349194a4a0c37ebf8ee3dae34c441dab65..37091b11761434a5ccfb6b40e3906ea184d9a52b 100644 (file)
@@ -18,6 +18,7 @@ struct audioformat {
        unsigned int frame_size;        /* samples per frame for non-audio */
        unsigned char iface;            /* interface number */
        unsigned char altsetting;       /* corresponding alternate setting */
+       unsigned char ep_idx;           /* endpoint array index */
        unsigned char altset_idx;       /* array index of altenate setting */
        unsigned char attributes;       /* corresponding attributes of cs endpoint */
        unsigned char endpoint;         /* endpoint */
@@ -42,6 +43,7 @@ struct audioformat {
 };
 
 struct snd_usb_substream;
+struct snd_usb_iface_ref;
 struct snd_usb_endpoint;
 struct snd_usb_power_domain;
 
@@ -58,6 +60,7 @@ struct snd_urb_ctx {
 
 struct snd_usb_endpoint {
        struct snd_usb_audio *chip;
+       struct snd_usb_iface_ref *iface_ref;
 
        int opened;             /* open refcount; protect with chip->mutex */
        atomic_t running;       /* running status */
index 162da7a500463f672992843ae3e952088c8504c7..fe73fe3ff2bcadccafa584bb5b756d5f730cf9a4 100644 (file)
 #define EP_FLAG_RUNNING                1
 #define EP_FLAG_STOPPING       2
 
+/* interface refcounting */
+struct snd_usb_iface_ref {
+       unsigned char iface;
+       bool need_setup;
+       int opened;
+       struct list_head list;
+};
+
 /*
  * snd_usb_endpoint is a model that abstracts everything related to an
  * USB endpoint and its streaming.
@@ -488,6 +496,28 @@ exit_clear:
        clear_bit(ctx->index, &ep->active_mask);
 }
 
+/*
+ * Find or create a refcount object for the given interface
+ *
+ * The objects are released altogether in snd_usb_endpoint_free_all()
+ */
+static struct snd_usb_iface_ref *
+iface_ref_find(struct snd_usb_audio *chip, int iface)
+{
+       struct snd_usb_iface_ref *ip;
+
+       list_for_each_entry(ip, &chip->iface_ref_list, list)
+               if (ip->iface == iface)
+                       return ip;
+
+       ip = kzalloc(sizeof(*ip), GFP_KERNEL);
+       if (!ip)
+               return NULL;
+       ip->iface = iface;
+       list_add_tail(&ip->list, &chip->iface_ref_list);
+       return ip;
+}
+
 /*
  * Get the existing endpoint object corresponding EP
  * Returns NULL if not present.
@@ -520,8 +550,8 @@ snd_usb_get_endpoint(struct snd_usb_audio *chip, int ep_num)
  *
  * Returns zero on success or a negative error code.
  *
- * New endpoints will be added to chip->ep_list and must be freed by
- * calling snd_usb_endpoint_free().
+ * New endpoints will be added to chip->ep_list and freed by
+ * calling snd_usb_endpoint_free_all().
  *
  * For SND_USB_ENDPOINT_TYPE_SYNC, the caller needs to guarantee that
  * bNumEndpoints > 1 beforehand.
@@ -653,11 +683,17 @@ snd_usb_endpoint_open(struct snd_usb_audio *chip,
                } else {
                        ep->iface = fp->iface;
                        ep->altsetting = fp->altsetting;
-                       ep->ep_idx = 0;
+                       ep->ep_idx = fp->ep_idx;
                }
                usb_audio_dbg(chip, "Open EP 0x%x, iface=%d:%d, idx=%d\n",
                              ep_num, ep->iface, ep->altsetting, ep->ep_idx);
 
+               ep->iface_ref = iface_ref_find(chip, ep->iface);
+               if (!ep->iface_ref) {
+                       ep = NULL;
+                       goto unlock;
+               }
+
                ep->cur_audiofmt = fp;
                ep->cur_channels = fp->channels;
                ep->cur_rate = params_rate(params);
@@ -681,6 +717,11 @@ snd_usb_endpoint_open(struct snd_usb_audio *chip,
                              ep->implicit_fb_sync);
 
        } else {
+               if (WARN_ON(!ep->iface_ref)) {
+                       ep = NULL;
+                       goto unlock;
+               }
+
                if (!endpoint_compatible(ep, fp, params)) {
                        usb_audio_err(chip, "Incompatible EP setup for 0x%x\n",
                                      ep_num);
@@ -692,6 +733,9 @@ snd_usb_endpoint_open(struct snd_usb_audio *chip,
                              ep_num, ep->opened);
        }
 
+       if (!ep->iface_ref->opened++)
+               ep->iface_ref->need_setup = true;
+
        ep->opened++;
 
  unlock:
@@ -760,12 +804,16 @@ void snd_usb_endpoint_close(struct snd_usb_audio *chip,
        mutex_lock(&chip->mutex);
        usb_audio_dbg(chip, "Closing EP 0x%x (count %d)\n",
                      ep->ep_num, ep->opened);
-       if (!--ep->opened) {
+
+       if (!--ep->iface_ref->opened)
                endpoint_set_interface(chip, ep, false);
+
+       if (!--ep->opened) {
                ep->iface = 0;
                ep->altsetting = 0;
                ep->cur_audiofmt = NULL;
                ep->cur_rate = 0;
+               ep->iface_ref = NULL;
                usb_audio_dbg(chip, "EP 0x%x closed\n", ep->ep_num);
        }
        mutex_unlock(&chip->mutex);
@@ -775,6 +823,8 @@ void snd_usb_endpoint_close(struct snd_usb_audio *chip,
 void snd_usb_endpoint_suspend(struct snd_usb_endpoint *ep)
 {
        ep->need_setup = true;
+       if (ep->iface_ref)
+               ep->iface_ref->need_setup = true;
 }
 
 /*
@@ -1195,11 +1245,13 @@ int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
        int err = 0;
 
        mutex_lock(&chip->mutex);
+       if (WARN_ON(!ep->iface_ref))
+               goto unlock;
        if (!ep->need_setup)
                goto unlock;
 
-       /* No need to (re-)configure the sync EP belonging to the same altset */
-       if (ep->ep_idx) {
+       /* If the interface has been already set up, just set EP parameters */
+       if (!ep->iface_ref->need_setup) {
                err = snd_usb_endpoint_set_params(chip, ep);
                if (err < 0)
                        goto unlock;
@@ -1242,6 +1294,8 @@ int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
                        goto unlock;
        }
 
+       ep->iface_ref->need_setup = false;
+
  done:
        ep->need_setup = false;
        err = 1;
@@ -1387,15 +1441,21 @@ void snd_usb_endpoint_release(struct snd_usb_endpoint *ep)
 }
 
 /**
- * snd_usb_endpoint_free: Free the resources of an snd_usb_endpoint
+ * snd_usb_endpoint_free_all: Free the resources of an snd_usb_endpoint
+ * @card: The chip
  *
- * @ep: the endpoint to free
- *
- * This free all resources of the given ep.
+ * This free all endpoints and those resources
  */
-void snd_usb_endpoint_free(struct snd_usb_endpoint *ep)
+void snd_usb_endpoint_free_all(struct snd_usb_audio *chip)
 {
-       kfree(ep);
+       struct snd_usb_endpoint *ep, *en;
+       struct snd_usb_iface_ref *ip, *in;
+
+       list_for_each_entry_safe(ep, en, &chip->ep_list, list)
+               kfree(ep);
+
+       list_for_each_entry_safe(ip, in, &chip->iface_ref_list, list)
+               kfree(ip);
 }
 
 /*
index 11e3bb839fd7e2a17c70067e402f90376e056401..eea4ca49876d67bb15bdd9b089a0f52760008224 100644 (file)
@@ -42,7 +42,7 @@ void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep);
 void snd_usb_endpoint_suspend(struct snd_usb_endpoint *ep);
 int  snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
 void snd_usb_endpoint_release(struct snd_usb_endpoint *ep);
-void snd_usb_endpoint_free(struct snd_usb_endpoint *ep);
+void snd_usb_endpoint_free_all(struct snd_usb_audio *chip);
 
 int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep);
 int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep,
index eb3a4c433c3e9dff8a17144113d013a90600bb63..1ac2cc6c33fb1628a15dce6293c3ce4351088a1c 100644 (file)
@@ -58,8 +58,6 @@ static const struct snd_usb_implicit_fb_match playback_implicit_fb_quirks[] = {
        IMPLICIT_FB_FIXED_DEV(0x0499, 0x172f, 0x81, 2), /* Steinberg UR22C */
        IMPLICIT_FB_FIXED_DEV(0x0d9a, 0x00df, 0x81, 2), /* RTX6001 */
        IMPLICIT_FB_FIXED_DEV(0x22f0, 0x0006, 0x81, 3), /* Allen&Heath Qu-16 */
-       IMPLICIT_FB_FIXED_DEV(0x2b73, 0x000a, 0x82, 0), /* Pioneer DJ DJM-900NXS2 */
-       IMPLICIT_FB_FIXED_DEV(0x2b73, 0x0017, 0x82, 0), /* Pioneer DJ DJM-250MK2 */
        IMPLICIT_FB_FIXED_DEV(0x1686, 0xf029, 0x82, 2), /* Zoom UAC-2 */
        IMPLICIT_FB_FIXED_DEV(0x2466, 0x8003, 0x86, 2), /* Fractal Audio Axe-Fx II */
        IMPLICIT_FB_FIXED_DEV(0x0499, 0x172a, 0x86, 2), /* Yamaha MODX */
@@ -74,10 +72,12 @@ static const struct snd_usb_implicit_fb_match playback_implicit_fb_quirks[] = {
 
        /* No quirk for playback but with capture quirk (see below) */
        IMPLICIT_FB_SKIP_DEV(0x0582, 0x0130),   /* BOSS BR-80 */
+       IMPLICIT_FB_SKIP_DEV(0x0582, 0x0171),   /* BOSS RC-505 */
        IMPLICIT_FB_SKIP_DEV(0x0582, 0x0189),   /* BOSS GT-100v2 */
        IMPLICIT_FB_SKIP_DEV(0x0582, 0x01d6),   /* BOSS GT-1 */
        IMPLICIT_FB_SKIP_DEV(0x0582, 0x01d8),   /* BOSS Katana */
        IMPLICIT_FB_SKIP_DEV(0x0582, 0x01e5),   /* BOSS GT-001 */
+       IMPLICIT_FB_SKIP_DEV(0x0582, 0x0203),   /* BOSS AD-10 */
 
        {} /* terminator */
 };
@@ -85,10 +85,12 @@ static const struct snd_usb_implicit_fb_match playback_implicit_fb_quirks[] = {
 /* Implicit feedback quirk table for capture: only FIXED type */
 static const struct snd_usb_implicit_fb_match capture_implicit_fb_quirks[] = {
        IMPLICIT_FB_FIXED_DEV(0x0582, 0x0130, 0x0d, 0x01), /* BOSS BR-80 */
+       IMPLICIT_FB_FIXED_DEV(0x0582, 0x0171, 0x0d, 0x01), /* BOSS RC-505 */
        IMPLICIT_FB_FIXED_DEV(0x0582, 0x0189, 0x0d, 0x01), /* BOSS GT-100v2 */
        IMPLICIT_FB_FIXED_DEV(0x0582, 0x01d6, 0x0d, 0x01), /* BOSS GT-1 */
        IMPLICIT_FB_FIXED_DEV(0x0582, 0x01d8, 0x0d, 0x01), /* BOSS Katana */
        IMPLICIT_FB_FIXED_DEV(0x0582, 0x01e5, 0x0d, 0x01), /* BOSS GT-001 */
+       IMPLICIT_FB_FIXED_DEV(0x0582, 0x0203, 0x0d, 0x01), /* BOSS AD-10 */
 
        {} /* terminator */
 };
@@ -96,7 +98,7 @@ static const struct snd_usb_implicit_fb_match capture_implicit_fb_quirks[] = {
 /* set up sync EP information on the audioformat */
 static int add_implicit_fb_sync_ep(struct snd_usb_audio *chip,
                                   struct audioformat *fmt,
-                                  int ep, int ifnum,
+                                  int ep, int ep_idx, int ifnum,
                                   const struct usb_host_interface *alts)
 {
        struct usb_interface *iface;
@@ -111,7 +113,7 @@ static int add_implicit_fb_sync_ep(struct snd_usb_audio *chip,
        fmt->sync_ep = ep;
        fmt->sync_iface = ifnum;
        fmt->sync_altsetting = alts->desc.bAlternateSetting;
-       fmt->sync_ep_idx = 0;
+       fmt->sync_ep_idx = ep_idx;
        fmt->implicit_fb = 1;
        usb_audio_dbg(chip,
                      "%d:%d: added %s implicit_fb sync_ep %x, iface %d:%d\n",
@@ -143,7 +145,7 @@ static int add_generic_uac2_implicit_fb(struct snd_usb_audio *chip,
            (epd->bmAttributes & USB_ENDPOINT_USAGE_MASK) !=
                                        USB_ENDPOINT_USAGE_IMPLICIT_FB)
                return 0;
-       return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress,
+       return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress, 0,
                                       ifnum, alts);
 }
 
@@ -169,10 +171,32 @@ static int add_roland_implicit_fb(struct snd_usb_audio *chip,
            (epd->bmAttributes & USB_ENDPOINT_USAGE_MASK) !=
                                        USB_ENDPOINT_USAGE_IMPLICIT_FB)
                return 0;
-       return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress,
+       return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress, 0,
                                       ifnum, alts);
 }
 
+/* Pioneer devices: playback and capture streams sharing the same iface/altset
+ */
+static int add_pioneer_implicit_fb(struct snd_usb_audio *chip,
+                                  struct audioformat *fmt,
+                                  struct usb_host_interface *alts)
+{
+       struct usb_endpoint_descriptor *epd;
+
+       if (alts->desc.bNumEndpoints != 2)
+               return 0;
+
+       epd = get_endpoint(alts, 1);
+       if (!usb_endpoint_is_isoc_in(epd) ||
+           (epd->bmAttributes & USB_ENDPOINT_SYNCTYPE) != USB_ENDPOINT_SYNC_ASYNC ||
+           ((epd->bmAttributes & USB_ENDPOINT_USAGE_MASK) !=
+            USB_ENDPOINT_USAGE_DATA &&
+            (epd->bmAttributes & USB_ENDPOINT_USAGE_MASK) !=
+            USB_ENDPOINT_USAGE_IMPLICIT_FB))
+               return 0;
+       return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress, 1,
+                                      alts->desc.bInterfaceNumber, alts);
+}
 
 static int __add_generic_implicit_fb(struct snd_usb_audio *chip,
                                     struct audioformat *fmt,
@@ -193,7 +217,7 @@ static int __add_generic_implicit_fb(struct snd_usb_audio *chip,
        if (!usb_endpoint_is_isoc_in(epd) ||
            (epd->bmAttributes & USB_ENDPOINT_SYNCTYPE) != USB_ENDPOINT_SYNC_ASYNC)
                return 0;
-       return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress,
+       return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress, 0,
                                       iface, alts);
 }
 
@@ -246,7 +270,7 @@ static int audioformat_implicit_fb_quirk(struct snd_usb_audio *chip,
                case IMPLICIT_FB_NONE:
                        return 0; /* No quirk */
                case IMPLICIT_FB_FIXED:
-                       return add_implicit_fb_sync_ep(chip, fmt, p->ep_num,
+                       return add_implicit_fb_sync_ep(chip, fmt, p->ep_num, 0,
                                                       p->iface, NULL);
                }
        }
@@ -274,6 +298,14 @@ static int audioformat_implicit_fb_quirk(struct snd_usb_audio *chip,
                        return 1;
        }
 
+       /* Pioneer devices implicit feedback with vendor spec class */
+       if (attr == USB_ENDPOINT_SYNC_ASYNC &&
+           alts->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
+           USB_ID_VENDOR(chip->usb_id) == 0x2b73 /* Pioneer */) {
+               if (add_pioneer_implicit_fb(chip, fmt, alts))
+                       return 1;
+       }
+
        /* Try the generic implicit fb if available */
        if (chip->generic_implicit_fb)
                return add_generic_implicit_fb(chip, fmt, alts);
@@ -291,8 +323,8 @@ static int audioformat_capture_quirk(struct snd_usb_audio *chip,
 
        p = find_implicit_fb_entry(chip, capture_implicit_fb_quirks, alts);
        if (p && p->type == IMPLICIT_FB_FIXED)
-               return add_implicit_fb_sync_ep(chip, fmt, p->ep_num, p->iface,
-                                              NULL);
+               return add_implicit_fb_sync_ep(chip, fmt, p->ep_num, 0,
+                                              p->iface, NULL);
        return 0;
 }
 
@@ -374,20 +406,19 @@ snd_usb_find_implicit_fb_sync_format(struct snd_usb_audio *chip,
                                     int stream)
 {
        struct snd_usb_substream *subs;
-       const struct audioformat *fp, *sync_fmt;
+       const struct audioformat *fp, *sync_fmt = NULL;
        int score, high_score;
 
-       /* When sharing the same altset, use the original audioformat */
+       /* Use the original audioformat as fallback for the shared altset */
        if (target->iface == target->sync_iface &&
            target->altsetting == target->sync_altsetting)
-               return target;
+               sync_fmt = target;
 
        subs = find_matching_substream(chip, stream, target->sync_ep,
                                       target->fmt_type);
        if (!subs)
-               return NULL;
+               return sync_fmt;
 
-       sync_fmt = NULL;
        high_score = 0;
        list_for_each_entry(fp, &subs->fmt_list, list) {
                score = match_endpoint_audioformats(subs, fp,
index c8213652470c4f42f2ced42ab247cd8d4692d815..0c23fa6d8525dade3e423d6ee5dadb200ff62af3 100644 (file)
@@ -1889,6 +1889,8 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi,
                ms_ep = find_usb_ms_endpoint_descriptor(hostep);
                if (!ms_ep)
                        continue;
+               if (ms_ep->bNumEmbMIDIJack > 0x10)
+                       continue;
                if (usb_endpoint_dir_out(ep)) {
                        if (endpoints[epidx].out_ep) {
                                if (++epidx >= MIDI_MAX_ENDPOINTS) {
@@ -2141,6 +2143,8 @@ static int snd_usbmidi_detect_roland(struct snd_usb_midi *umidi,
                    cs_desc[1] == USB_DT_CS_INTERFACE &&
                    cs_desc[2] == 0xf1 &&
                    cs_desc[3] == 0x02) {
+                       if (cs_desc[4] > 0x10 || cs_desc[5] > 0x10)
+                               continue;
                        endpoint->in_cables  = (1 << cs_desc[4]) - 1;
                        endpoint->out_cables = (1 << cs_desc[5]) - 1;
                        return snd_usbmidi_detect_endpoints(umidi, endpoint, 1);
index 0e11cb96fa8cfe01215782d6e6b3d9fde5e18ccd..c8a4bdf18207c915486b0ebcdaa9f9405f8ebe44 100644 (file)
@@ -3362,6 +3362,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
                                        .altsetting = 1,
                                        .altset_idx = 1,
                                        .endpoint = 0x86,
+                                       .ep_idx = 1,
                                        .ep_attr = USB_ENDPOINT_XFER_ISOC|
                                                 USB_ENDPOINT_SYNC_ASYNC|
                                                 USB_ENDPOINT_USAGE_IMPLICIT_FB,
@@ -3450,6 +3451,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
                                        .altsetting = 1,
                                        .altset_idx = 1,
                                        .endpoint = 0x82,
+                                       .ep_idx = 1,
                                        .ep_attr = USB_ENDPOINT_XFER_ISOC|
                                                USB_ENDPOINT_SYNC_ASYNC|
                                                USB_ENDPOINT_USAGE_IMPLICIT_FB,
@@ -3506,6 +3508,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
                                        .altsetting = 1,
                                        .altset_idx = 1,
                                        .endpoint = 0x82,
+                                       .ep_idx = 1,
                                        .ep_attr = USB_ENDPOINT_XFER_ISOC|
                                                 USB_ENDPOINT_SYNC_ASYNC|
                                                 USB_ENDPOINT_USAGE_IMPLICIT_FB,
@@ -3562,6 +3565,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
                                        .altsetting = 1,
                                        .altset_idx = 1,
                                        .endpoint = 0x82,
+                                       .ep_idx = 1,
                                        .ep_attr = USB_ENDPOINT_XFER_ISOC|
                                                 USB_ENDPOINT_SYNC_ASYNC|
                                                 USB_ENDPOINT_USAGE_IMPLICIT_FB,
@@ -3619,6 +3623,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
                                        .altsetting = 1,
                                        .altset_idx = 1,
                                        .endpoint = 0x82,
+                                       .ep_idx = 1,
                                        .ep_attr = USB_ENDPOINT_XFER_ISOC|
                                                USB_ENDPOINT_SYNC_ASYNC|
                                        USB_ENDPOINT_USAGE_IMPLICIT_FB,
@@ -3679,6 +3684,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
                                        .altsetting = 1,
                                        .altset_idx = 1,
                                        .endpoint = 0x82,
+                                       .ep_idx = 1,
                                        .ep_attr = USB_ENDPOINT_XFER_ISOC|
                                            USB_ENDPOINT_SYNC_ASYNC|
                                            USB_ENDPOINT_USAGE_IMPLICIT_FB,
index e4a690bb4c996b7670186e078ec8063e26af799e..89e172642d98b6ddb4d697a3f324b66d6cc3814f 100644 (file)
@@ -120,6 +120,40 @@ static int create_standard_audio_quirk(struct snd_usb_audio *chip,
        return 0;
 }
 
+/* create the audio stream and the corresponding endpoints from the fixed
+ * audioformat object; this is used for quirks with the fixed EPs
+ */
+static int add_audio_stream_from_fixed_fmt(struct snd_usb_audio *chip,
+                                          struct audioformat *fp)
+{
+       int stream, err;
+
+       stream = (fp->endpoint & USB_DIR_IN) ?
+               SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
+
+       snd_usb_audioformat_set_sync_ep(chip, fp);
+
+       err = snd_usb_add_audio_stream(chip, stream, fp);
+       if (err < 0)
+               return err;
+
+       err = snd_usb_add_endpoint(chip, fp->endpoint,
+                                  SND_USB_ENDPOINT_TYPE_DATA);
+       if (err < 0)
+               return err;
+
+       if (fp->sync_ep) {
+               err = snd_usb_add_endpoint(chip, fp->sync_ep,
+                                          fp->implicit_fb ?
+                                          SND_USB_ENDPOINT_TYPE_DATA :
+                                          SND_USB_ENDPOINT_TYPE_SYNC);
+               if (err < 0)
+                       return err;
+       }
+
+       return 0;
+}
+
 /*
  * create a stream for an endpoint/altsetting without proper descriptors
  */
@@ -131,8 +165,8 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
        struct audioformat *fp;
        struct usb_host_interface *alts;
        struct usb_interface_descriptor *altsd;
-       int stream, err;
        unsigned *rate_table = NULL;
+       int err;
 
        fp = kmemdup(quirk->data, sizeof(*fp), GFP_KERNEL);
        if (!fp)
@@ -153,11 +187,6 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
                fp->rate_table = rate_table;
        }
 
-       stream = (fp->endpoint & USB_DIR_IN)
-               ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
-       err = snd_usb_add_audio_stream(chip, stream, fp);
-       if (err < 0)
-               goto error;
        if (fp->iface != get_iface_desc(&iface->altsetting[0])->bInterfaceNumber ||
            fp->altset_idx >= iface->num_altsetting) {
                err = -EINVAL;
@@ -165,7 +194,7 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
        }
        alts = &iface->altsetting[fp->altset_idx];
        altsd = get_iface_desc(alts);
-       if (altsd->bNumEndpoints < 1) {
+       if (altsd->bNumEndpoints <= fp->ep_idx) {
                err = -EINVAL;
                goto error;
        }
@@ -175,7 +204,14 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
        if (fp->datainterval == 0)
                fp->datainterval = snd_usb_parse_datainterval(chip, alts);
        if (fp->maxpacksize == 0)
-               fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
+               fp->maxpacksize = le16_to_cpu(get_endpoint(alts, fp->ep_idx)->wMaxPacketSize);
+       if (!fp->fmt_type)
+               fp->fmt_type = UAC_FORMAT_TYPE_I;
+
+       err = add_audio_stream_from_fixed_fmt(chip, fp);
+       if (err < 0)
+               goto error;
+
        usb_set_interface(chip->dev, fp->iface, 0);
        snd_usb_init_pitch(chip, fp);
        snd_usb_init_sample_rate(chip, fp, fp->rate_max);
@@ -417,7 +453,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
        struct usb_host_interface *alts;
        struct usb_interface_descriptor *altsd;
        struct audioformat *fp;
-       int stream, err;
+       int err;
 
        /* both PCM and MIDI interfaces have 2 or more altsettings */
        if (iface->num_altsetting < 2)
@@ -482,9 +518,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
                return -ENXIO;
        }
 
-       stream = (fp->endpoint & USB_DIR_IN)
-               ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
-       err = snd_usb_add_audio_stream(chip, stream, fp);
+       err = add_audio_stream_from_fixed_fmt(chip, fp);
        if (err < 0) {
                list_del(&fp->list); /* unlink for avoiding double-free */
                kfree(fp);
index 980287aadd361b4b48f3aaa63faa6e05495805e0..215c1771dd570725671ceab9913ca2cdadc33126 100644 (file)
@@ -44,6 +44,7 @@ struct snd_usb_audio {
 
        struct list_head pcm_list;      /* list of pcm streams */
        struct list_head ep_list;       /* list of audio-related endpoints */
+       struct list_head iface_ref_list; /* list of interface refcounts */
        int pcm_devs;
 
        struct list_head midi_list;     /* list of midi interfaces */
index 595e164dc352f08d97125f04e49efd89e9281811..feb30c2c788159008c6caa00fe11b48a27b8a9b3 100755 (executable)
@@ -152,6 +152,7 @@ setup_instance() { # [instance]
        set_array_of ${instance}.options ${instancedir}/trace_options
        set_value_of ${instance}.trace_clock ${instancedir}/trace_clock
        set_value_of ${instance}.cpumask ${instancedir}/tracing_cpumask
+       set_value_of ${instance}.tracing_on ${instancedir}/tracing_on
        set_value_of ${instance}.tracer ${instancedir}/current_tracer
        set_array_of ${instance}.ftrace.filters \
                ${instancedir}/set_ftrace_filter
index 6c0d4b61e0c260a9e213453b76e887282a1edde1..a0c3bcc6da4f36c24bc29efa4ec8a718d213e3c1 100755 (executable)
@@ -221,6 +221,10 @@ instance_options() { # [instance-name]
        if [ `echo $val | sed -e s/f//g`x != x ]; then
                emit_kv $PREFIX.cpumask = $val
        fi
+       val=`cat $INSTANCE/tracing_on`
+       if [ `echo $val | sed -e s/f//g`x != x ]; then
+               emit_kv $PREFIX.tracing_on = $val
+       fi
 
        val=
        for i in `cat $INSTANCE/set_event`; do
index 3fae61ef63396dad501e1b65bf326ff7728c5522..ff3aa0cf3997879b898878df7099722c2050aad8 100644 (file)
@@ -11,7 +11,6 @@
 #include <bpf/bpf.h>
 #include <bpf/libbpf.h>
 #include <net/if.h>
-#include <linux/if.h>
 #include <linux/rtnetlink.h>
 #include <linux/socket.h>
 #include <linux/tc_act/tc_bpf.h>
index e3ea569ee1253ec73a82d12189aa6eaa157df5fa..7409d7860aa6c3167b6a9e282f4e046b06e6b91f 100644 (file)
@@ -139,6 +139,8 @@ int eprintf(int level, int var, const char *fmt, ...)
 #define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_err(fmt, ...) \
        eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info(fmt, ...) \
+       eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__)
 
 static bool is_btf_id(const char *name)
 {
@@ -472,7 +474,7 @@ static int symbols_resolve(struct object *obj)
        int nr_funcs    = obj->nr_funcs;
        int err, type_id;
        struct btf *btf;
-       __u32 nr;
+       __u32 nr_types;
 
        btf = btf__parse(obj->btf ?: obj->path, NULL);
        err = libbpf_get_error(btf);
@@ -483,12 +485,12 @@ static int symbols_resolve(struct object *obj)
        }
 
        err = -1;
-       nr  = btf__get_nr_types(btf);
+       nr_types = btf__get_nr_types(btf);
 
        /*
         * Iterate all the BTF types and search for collected symbol IDs.
         */
-       for (type_id = 1; type_id <= nr; type_id++) {
+       for (type_id = 1; type_id <= nr_types; type_id++) {
                const struct btf_type *type;
                struct rb_root *root;
                struct btf_id *id;
@@ -526,8 +528,13 @@ static int symbols_resolve(struct object *obj)
 
                id = btf_id__find(root, str);
                if (id) {
-                       id->id = type_id;
-                       (*nr)--;
+                       if (id->id) {
+                               pr_info("WARN: multiple IDs found for '%s': %d, %d - using %d\n",
+                                       str, id->id, type_id, id->id);
+                       } else {
+                               id->id = type_id;
+                               (*nr)--;
+                       }
                }
        }
 
index ce365d212768211b8812054bae5399f464d67acb..cc7070c7439ba6a5327d992a6cebf4fdc591f7ab 100644 (file)
@@ -79,9 +79,4 @@
 #define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
 #endif // static_assert
 
-#ifdef __GENKSYMS__
-/* genksyms gets confused by _Static_assert */
-#define _Static_assert(expr, ...)
-#endif
-
 #endif /* _LINUX_BUILD_BUG_H */
index 886802b8ffba3617f64b47f7f6f452335fdfa23e..374c67875cdbd5c60eb15c1b932e7fcdc7f9fa7d 100644 (file)
@@ -251,6 +251,7 @@ struct kvm_hyperv_exit {
 #define KVM_EXIT_X86_RDMSR        29
 #define KVM_EXIT_X86_WRMSR        30
 #define KVM_EXIT_DIRTY_RING_FULL  31
+#define KVM_EXIT_AP_RESET_HOLD    32
 
 /* For KVM_EXIT_INTERNAL_ERROR */
 /* Emulate instruction failed. */
@@ -573,6 +574,7 @@ struct kvm_vapic_addr {
 #define KVM_MP_STATE_CHECK_STOP        6
 #define KVM_MP_STATE_OPERATING         7
 #define KVM_MP_STATE_LOAD              8
+#define KVM_MP_STATE_AP_RESET_HOLD     9
 
 struct kvm_mp_state {
        __u32 mp_state;
index 3c3f2bc6c6528e08fdaa913f1da43e2c02799b48..9970a288dda536768b5878f5f0a2c71dc0af8a97 100644 (file)
@@ -240,11 +240,6 @@ static int btf_parse_hdr(struct btf *btf)
        }
 
        meta_left = btf->raw_size - sizeof(*hdr);
-       if (!meta_left) {
-               pr_debug("BTF has no data\n");
-               return -EINVAL;
-       }
-
        if (meta_left < hdr->str_off + hdr->str_len) {
                pr_debug("Invalid BTF total size:%u\n", btf->raw_size);
                return -EINVAL;
index c8d45091e7c26a3bede00f38dc72b03f83d435b4..c70e9e03af3e9929813017c48df7f063f9f701e4 100644 (file)
@@ -27,5 +27,5 @@ int main(int argc, char **argv)
        perf_cpu_map__put(cpus);
 
        __T_END;
-       return 0;
+       return tests_failed == 0 ? 0 : -1;
 }
index 6d8ebe0c25042f7c6902744967c0cb7074db62c8..e2ac0b7f432eaa87aeab1b8952ca27d675ae962a 100644 (file)
@@ -208,13 +208,13 @@ static int test_mmap_thread(void)
        char path[PATH_MAX];
        int id, err, pid, go_pipe[2];
        union perf_event *event;
-       char bf;
        int count = 0;
 
        snprintf(path, PATH_MAX, "%s/kernel/debug/tracing/events/syscalls/sys_enter_prctl/id",
                 sysfs__mountpoint());
 
        if (filename__read_int(path, &id)) {
+               tests_failed++;
                fprintf(stderr, "error: failed to get tracepoint id: %s\n", path);
                return -1;
        }
@@ -229,6 +229,7 @@ static int test_mmap_thread(void)
        pid = fork();
        if (!pid) {
                int i;
+               char bf;
 
                read(go_pipe[0], &bf, 1);
 
@@ -266,7 +267,7 @@ static int test_mmap_thread(void)
        perf_evlist__enable(evlist);
 
        /* kick the child and wait for it to finish */
-       write(go_pipe[1], &bf, 1);
+       write(go_pipe[1], "A", 1);
        waitpid(pid, NULL, 0);
 
        /*
@@ -409,5 +410,5 @@ int main(int argc, char **argv)
        test_mmap_cpus();
 
        __T_END;
-       return 0;
+       return tests_failed == 0 ? 0 : -1;
 }
index 135722ac965bf7dcd75a3d50dc2bc9250cfd89a2..0ad82d7a2a51b690e57d2d28f16531fe7df40609 100644 (file)
@@ -131,5 +131,5 @@ int main(int argc, char **argv)
        test_stat_thread_enable();
 
        __T_END;
-       return 0;
+       return tests_failed == 0 ? 0 : -1;
 }
index 7dc4d6fbeddee5bdc4b0f0316f8b6a1814fa2c6b..384471441b4842e8cd472d2750e267dcbb05ec3f 100644 (file)
@@ -27,5 +27,5 @@ int main(int argc, char **argv)
        perf_thread_map__put(threads);
 
        __T_END;
-       return 0;
+       return tests_failed == 0 ? 0 : -1;
 }
index 65c4ff6892d967ab02459ba14f258d7080d6e078..e6b6181c6dc63b29abf7463c7bee52c048e5e112 100644 (file)
@@ -39,7 +39,7 @@
    Copyright (C) 2018 Red Hat, Inc., Arnaldo Carvalho de Melo <acme@redhat.com>
 */
 
-#include <bpf/bpf.h>
+#include <bpf.h>
 
 #define NSEC_PER_SEC   1000000000L
 
index 249dfe48cf6ad490d1f3066e137d7f843cce00ab..ebebd3596cf99379d92600bcdae786e9c45e3694 100755 (executable)
@@ -9,31 +9,29 @@ perf stat -a true > /dev/null 2>&1 || exit 2
 
 test_global_aggr()
 {
-       local cyc
-
        perf stat -a --no-big-num -e cycles,instructions sleep 1  2>&1 | \
        grep -e cycles -e instructions | \
        while read num evt hash ipc rest
        do
                # skip not counted events
-               if [[ $num == "<not" ]]; then
+               if [ "$num" = "<not" ]; then
                        continue
                fi
 
                # save cycles count
-               if [[ $evt == "cycles" ]]; then
+               if [ "$evt" = "cycles" ]; then
                        cyc=$num
                        continue
                fi
 
                # skip if no cycles
-               if [[ -z $cyc ]]; then
+               if [ -z "$cyc" ]; then
                        continue
                fi
 
                # use printf for rounding and a leading zero
-               local res=`printf "%.2f" $(echo "scale=6; $num / $cyc" | bc -q)`
-               if [[ $ipc != $res ]]; then
+               res=`printf "%.2f" $(echo "scale=6; $num / $cyc" | bc -q)`
+               if [ "$ipc" != "$res" ]; then
                        echo "IPC is different: $res != $ipc  ($num / $cyc)"
                        exit 1
                fi
@@ -42,32 +40,32 @@ test_global_aggr()
 
 test_no_aggr()
 {
-       declare -A results
-
        perf stat -a -A --no-big-num -e cycles,instructions sleep 1  2>&1 | \
        grep ^CPU | \
        while read cpu num evt hash ipc rest
        do
                # skip not counted events
-               if [[ $num == "<not" ]]; then
+               if [ "$num" = "<not" ]; then
                        continue
                fi
 
                # save cycles count
-               if [[ $evt == "cycles" ]]; then
-                       results[$cpu]=$num
+               if [ "$evt" = "cycles" ]; then
+                       results="$results $cpu:$num"
                        continue
                fi
 
+               cyc=${results##* $cpu:}
+               cyc=${cyc%% *}
+
                # skip if no cycles
-               local cyc=${results[$cpu]}
-               if [[ -z $cyc ]]; then
+               if [ -z "$cyc" ]; then
                        continue
                fi
 
                # use printf for rounding and a leading zero
-               local res=`printf "%.2f" $(echo "scale=6; $num / $cyc" | bc -q)`
-               if [[ $ipc != $res ]]; then
+               res=`printf "%.2f" $(echo "scale=6; $num / $cyc" | bc -q)`
+               if [ "$ipc" != "$res" ]; then
                        echo "IPC is different for $cpu: $res != $ipc  ($num / $cyc)"
                        exit 1
                fi
index 062383e225a3e9e64f229b739d2c155b5430e28d..c4ed3dc2c8f405323854e81225b2e1b32bbc7f7d 100644 (file)
@@ -3323,6 +3323,14 @@ int perf_session__write_header(struct perf_session *session,
        attr_offset = lseek(ff.fd, 0, SEEK_CUR);
 
        evlist__for_each_entry(evlist, evsel) {
+               if (evsel->core.attr.size < sizeof(evsel->core.attr)) {
+                       /*
+                        * We are likely in "perf inject" and have read
+                        * from an older file. Update attr size so that
+                        * reader gets the right offset to the ids.
+                        */
+                       evsel->core.attr.size = sizeof(evsel->core.attr);
+               }
                f_attr = (struct perf_file_attr){
                        .attr = evsel->core.attr,
                        .ids  = {
index f841f3503cae61de55ce1b8dbe912b7b0f8e2f53..1e9d3f982b47742f8c5c043c131945c2b18151cd 100644 (file)
@@ -2980,7 +2980,7 @@ int machines__for_each_thread(struct machines *machines,
 
 pid_t machine__get_current_tid(struct machine *machine, int cpu)
 {
-       int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS);
+       int nr_cpus = min(machine->env->nr_cpus_avail, MAX_NR_CPUS);
 
        if (cpu < 0 || cpu >= nr_cpus || !machine->current_tid)
                return -1;
@@ -2992,7 +2992,7 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
                             pid_t tid)
 {
        struct thread *thread;
-       int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS);
+       int nr_cpus = min(machine->env->nr_cpus_avail, MAX_NR_CPUS);
 
        if (cpu < 0)
                return -EINVAL;
index 50ff9795a4f112acb8f73e6750fef60f26d6f48a..25adbcce02814145650819cf9cbf76284c3e434d 100644 (file)
@@ -2404,7 +2404,7 @@ int perf_session__cpu_bitmap(struct perf_session *session,
 {
        int i, err = -1;
        struct perf_cpu_map *map;
-       int nr_cpus = min(session->header.env.nr_cpus_online, MAX_NR_CPUS);
+       int nr_cpus = min(session->header.env.nr_cpus_avail, MAX_NR_CPUS);
 
        for (i = 0; i < PERF_TYPE_MAX; ++i) {
                struct evsel *evsel;
index 901265127e3698126bc9f395058ee4f12fd05419..12eafd12a693c077178be44b5b60620dee0d03cc 100644 (file)
@@ -8,6 +8,7 @@
 #include "evlist.h"
 #include "expr.h"
 #include "metricgroup.h"
+#include "cgroup.h"
 #include <linux/zalloc.h>
 
 /*
@@ -28,6 +29,7 @@ struct saved_value {
        enum stat_type type;
        int ctx;
        int cpu;
+       struct cgroup *cgrp;
        struct runtime_stat *stat;
        struct stats stats;
        u64 metric_total;
@@ -57,6 +59,9 @@ static int saved_value_cmp(struct rb_node *rb_node, const void *entry)
        if (a->ctx != b->ctx)
                return a->ctx - b->ctx;
 
+       if (a->cgrp != b->cgrp)
+               return (char *)a->cgrp < (char *)b->cgrp ? -1 : +1;
+
        if (a->evsel == NULL && b->evsel == NULL) {
                if (a->stat == b->stat)
                        return 0;
@@ -100,7 +105,8 @@ static struct saved_value *saved_value_lookup(struct evsel *evsel,
                                              bool create,
                                              enum stat_type type,
                                              int ctx,
-                                             struct runtime_stat *st)
+                                             struct runtime_stat *st,
+                                             struct cgroup *cgrp)
 {
        struct rblist *rblist;
        struct rb_node *nd;
@@ -110,10 +116,15 @@ static struct saved_value *saved_value_lookup(struct evsel *evsel,
                .type = type,
                .ctx = ctx,
                .stat = st,
+               .cgrp = cgrp,
        };
 
        rblist = &st->value_list;
 
+       /* don't use context info for clock events */
+       if (type == STAT_NSECS)
+               dm.ctx = 0;
+
        nd = rblist__find(rblist, &dm);
        if (nd)
                return container_of(nd, struct saved_value, rb_node);
@@ -191,12 +202,18 @@ void perf_stat__reset_shadow_per_stat(struct runtime_stat *st)
        reset_stat(st);
 }
 
+struct runtime_stat_data {
+       int ctx;
+       struct cgroup *cgrp;
+};
+
 static void update_runtime_stat(struct runtime_stat *st,
                                enum stat_type type,
-                               int ctx, int cpu, u64 count)
+                               int cpu, u64 count,
+                               struct runtime_stat_data *rsd)
 {
-       struct saved_value *v = saved_value_lookup(NULL, cpu, true,
-                                                  type, ctx, st);
+       struct saved_value *v = saved_value_lookup(NULL, cpu, true, type,
+                                                  rsd->ctx, st, rsd->cgrp);
 
        if (v)
                update_stats(&v->stats, count);
@@ -210,82 +227,86 @@ static void update_runtime_stat(struct runtime_stat *st,
 void perf_stat__update_shadow_stats(struct evsel *counter, u64 count,
                                    int cpu, struct runtime_stat *st)
 {
-       int ctx = evsel_context(counter);
        u64 count_ns = count;
        struct saved_value *v;
+       struct runtime_stat_data rsd = {
+               .ctx = evsel_context(counter),
+               .cgrp = counter->cgrp,
+       };
 
        count *= counter->scale;
 
        if (evsel__is_clock(counter))
-               update_runtime_stat(st, STAT_NSECS, 0, cpu, count_ns);
+               update_runtime_stat(st, STAT_NSECS, cpu, count_ns, &rsd);
        else if (evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
-               update_runtime_stat(st, STAT_CYCLES, ctx, cpu, count);
+               update_runtime_stat(st, STAT_CYCLES, cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
-               update_runtime_stat(st, STAT_CYCLES_IN_TX, ctx, cpu, count);
+               update_runtime_stat(st, STAT_CYCLES_IN_TX, cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, TRANSACTION_START))
-               update_runtime_stat(st, STAT_TRANSACTION, ctx, cpu, count);
+               update_runtime_stat(st, STAT_TRANSACTION, cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, ELISION_START))
-               update_runtime_stat(st, STAT_ELISION, ctx, cpu, count);
+               update_runtime_stat(st, STAT_ELISION, cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS))
                update_runtime_stat(st, STAT_TOPDOWN_TOTAL_SLOTS,
-                                   ctx, cpu, count);
+                                   cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED))
                update_runtime_stat(st, STAT_TOPDOWN_SLOTS_ISSUED,
-                                   ctx, cpu, count);
+                                   cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED))
                update_runtime_stat(st, STAT_TOPDOWN_SLOTS_RETIRED,
-                                   ctx, cpu, count);
+                                   cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES))
                update_runtime_stat(st, STAT_TOPDOWN_FETCH_BUBBLES,
-                                   ctx, cpu, count);
+                                   cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES))
                update_runtime_stat(st, STAT_TOPDOWN_RECOVERY_BUBBLES,
-                                   ctx, cpu, count);
+                                   cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, TOPDOWN_RETIRING))
                update_runtime_stat(st, STAT_TOPDOWN_RETIRING,
-                                   ctx, cpu, count);
+                                   cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, TOPDOWN_BAD_SPEC))
                update_runtime_stat(st, STAT_TOPDOWN_BAD_SPEC,
-                                   ctx, cpu, count);
+                                   cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, TOPDOWN_FE_BOUND))
                update_runtime_stat(st, STAT_TOPDOWN_FE_BOUND,
-                                   ctx, cpu, count);
+                                   cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, TOPDOWN_BE_BOUND))
                update_runtime_stat(st, STAT_TOPDOWN_BE_BOUND,
-                                   ctx, cpu, count);
+                                   cpu, count, &rsd);
        else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
                update_runtime_stat(st, STAT_STALLED_CYCLES_FRONT,
-                                   ctx, cpu, count);
+                                   cpu, count, &rsd);
        else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
                update_runtime_stat(st, STAT_STALLED_CYCLES_BACK,
-                                   ctx, cpu, count);
+                                   cpu, count, &rsd);
        else if (evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
-               update_runtime_stat(st, STAT_BRANCHES, ctx, cpu, count);
+               update_runtime_stat(st, STAT_BRANCHES, cpu, count, &rsd);
        else if (evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
-               update_runtime_stat(st, STAT_CACHEREFS, ctx, cpu, count);
+               update_runtime_stat(st, STAT_CACHEREFS, cpu, count, &rsd);
        else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
-               update_runtime_stat(st, STAT_L1_DCACHE, ctx, cpu, count);
+               update_runtime_stat(st, STAT_L1_DCACHE, cpu, count, &rsd);
        else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
-               update_runtime_stat(st, STAT_L1_ICACHE, ctx, cpu, count);
+               update_runtime_stat(st, STAT_L1_ICACHE, cpu, count, &rsd);
        else if (evsel__match(counter, HW_CACHE, HW_CACHE_LL))
-               update_runtime_stat(st, STAT_LL_CACHE, ctx, cpu, count);
+               update_runtime_stat(st, STAT_LL_CACHE, cpu, count, &rsd);
        else if (evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
-               update_runtime_stat(st, STAT_DTLB_CACHE, ctx, cpu, count);
+               update_runtime_stat(st, STAT_DTLB_CACHE, cpu, count, &rsd);
        else if (evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
-               update_runtime_stat(st, STAT_ITLB_CACHE, ctx, cpu, count);
+               update_runtime_stat(st, STAT_ITLB_CACHE, cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, SMI_NUM))
-               update_runtime_stat(st, STAT_SMI_NUM, ctx, cpu, count);
+               update_runtime_stat(st, STAT_SMI_NUM, cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, APERF))
-               update_runtime_stat(st, STAT_APERF, ctx, cpu, count);
+               update_runtime_stat(st, STAT_APERF, cpu, count, &rsd);
 
        if (counter->collect_stat) {
-               v = saved_value_lookup(counter, cpu, true, STAT_NONE, 0, st);
+               v = saved_value_lookup(counter, cpu, true, STAT_NONE, 0, st,
+                                      rsd.cgrp);
                update_stats(&v->stats, count);
                if (counter->metric_leader)
                        v->metric_total += count;
        } else if (counter->metric_leader) {
                v = saved_value_lookup(counter->metric_leader,
-                                      cpu, true, STAT_NONE, 0, st);
+                                      cpu, true, STAT_NONE, 0, st, rsd.cgrp);
                v->metric_total += count;
                v->metric_other++;
        }
@@ -422,11 +443,12 @@ void perf_stat__collect_metric_expr(struct evlist *evsel_list)
 }
 
 static double runtime_stat_avg(struct runtime_stat *st,
-                              enum stat_type type, int ctx, int cpu)
+                              enum stat_type type, int cpu,
+                              struct runtime_stat_data *rsd)
 {
        struct saved_value *v;
 
-       v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
+       v = saved_value_lookup(NULL, cpu, false, type, rsd->ctx, st, rsd->cgrp);
        if (!v)
                return 0.0;
 
@@ -434,11 +456,12 @@ static double runtime_stat_avg(struct runtime_stat *st,
 }
 
 static double runtime_stat_n(struct runtime_stat *st,
-                            enum stat_type type, int ctx, int cpu)
+                            enum stat_type type, int cpu,
+                            struct runtime_stat_data *rsd)
 {
        struct saved_value *v;
 
-       v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
+       v = saved_value_lookup(NULL, cpu, false, type, rsd->ctx, st, rsd->cgrp);
        if (!v)
                return 0.0;
 
@@ -446,16 +469,15 @@ static double runtime_stat_n(struct runtime_stat *st,
 }
 
 static void print_stalled_cycles_frontend(struct perf_stat_config *config,
-                                         int cpu,
-                                         struct evsel *evsel, double avg,
+                                         int cpu, double avg,
                                          struct perf_stat_output_ctx *out,
-                                         struct runtime_stat *st)
+                                         struct runtime_stat *st,
+                                         struct runtime_stat_data *rsd)
 {
        double total, ratio = 0.0;
        const char *color;
-       int ctx = evsel_context(evsel);
 
-       total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+       total = runtime_stat_avg(st, STAT_CYCLES, cpu, rsd);
 
        if (total)
                ratio = avg / total * 100.0;
@@ -470,16 +492,15 @@ static void print_stalled_cycles_frontend(struct perf_stat_config *config,
 }
 
 static void print_stalled_cycles_backend(struct perf_stat_config *config,
-                                        int cpu,
-                                        struct evsel *evsel, double avg,
+                                        int cpu, double avg,
                                         struct perf_stat_output_ctx *out,
-                                        struct runtime_stat *st)
+                                        struct runtime_stat *st,
+                                        struct runtime_stat_data *rsd)
 {
        double total, ratio = 0.0;
        const char *color;
-       int ctx = evsel_context(evsel);
 
-       total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+       total = runtime_stat_avg(st, STAT_CYCLES, cpu, rsd);
 
        if (total)
                ratio = avg / total * 100.0;
@@ -490,17 +511,15 @@ static void print_stalled_cycles_backend(struct perf_stat_config *config,
 }
 
 static void print_branch_misses(struct perf_stat_config *config,
-                               int cpu,
-                               struct evsel *evsel,
-                               double avg,
+                               int cpu, double avg,
                                struct perf_stat_output_ctx *out,
-                               struct runtime_stat *st)
+                               struct runtime_stat *st,
+                               struct runtime_stat_data *rsd)
 {
        double total, ratio = 0.0;
        const char *color;
-       int ctx = evsel_context(evsel);
 
-       total = runtime_stat_avg(st, STAT_BRANCHES, ctx, cpu);
+       total = runtime_stat_avg(st, STAT_BRANCHES, cpu, rsd);
 
        if (total)
                ratio = avg / total * 100.0;
@@ -511,18 +530,15 @@ static void print_branch_misses(struct perf_stat_config *config,
 }
 
 static void print_l1_dcache_misses(struct perf_stat_config *config,
-                                  int cpu,
-                                  struct evsel *evsel,
-                                  double avg,
+                                  int cpu, double avg,
                                   struct perf_stat_output_ctx *out,
-                                  struct runtime_stat *st)
-
+                                  struct runtime_stat *st,
+                                  struct runtime_stat_data *rsd)
 {
        double total, ratio = 0.0;
        const char *color;
-       int ctx = evsel_context(evsel);
 
-       total = runtime_stat_avg(st, STAT_L1_DCACHE, ctx, cpu);
+       total = runtime_stat_avg(st, STAT_L1_DCACHE, cpu, rsd);
 
        if (total)
                ratio = avg / total * 100.0;
@@ -533,18 +549,15 @@ static void print_l1_dcache_misses(struct perf_stat_config *config,
 }
 
 static void print_l1_icache_misses(struct perf_stat_config *config,
-                                  int cpu,
-                                  struct evsel *evsel,
-                                  double avg,
+                                  int cpu, double avg,
                                   struct perf_stat_output_ctx *out,
-                                  struct runtime_stat *st)
-
+                                  struct runtime_stat *st,
+                                  struct runtime_stat_data *rsd)
 {
        double total, ratio = 0.0;
        const char *color;
-       int ctx = evsel_context(evsel);
 
-       total = runtime_stat_avg(st, STAT_L1_ICACHE, ctx, cpu);
+       total = runtime_stat_avg(st, STAT_L1_ICACHE, cpu, rsd);
 
        if (total)
                ratio = avg / total * 100.0;
@@ -554,17 +567,15 @@ static void print_l1_icache_misses(struct perf_stat_config *config,
 }
 
 static void print_dtlb_cache_misses(struct perf_stat_config *config,
-                                   int cpu,
-                                   struct evsel *evsel,
-                                   double avg,
+                                   int cpu, double avg,
                                    struct perf_stat_output_ctx *out,
-                                   struct runtime_stat *st)
+                                   struct runtime_stat *st,
+                                   struct runtime_stat_data *rsd)
 {
        double total, ratio = 0.0;
        const char *color;
-       int ctx = evsel_context(evsel);
 
-       total = runtime_stat_avg(st, STAT_DTLB_CACHE, ctx, cpu);
+       total = runtime_stat_avg(st, STAT_DTLB_CACHE, cpu, rsd);
 
        if (total)
                ratio = avg / total * 100.0;
@@ -574,17 +585,15 @@ static void print_dtlb_cache_misses(struct perf_stat_config *config,
 }
 
 static void print_itlb_cache_misses(struct perf_stat_config *config,
-                                   int cpu,
-                                   struct evsel *evsel,
-                                   double avg,
+                                   int cpu, double avg,
                                    struct perf_stat_output_ctx *out,
-                                   struct runtime_stat *st)
+                                   struct runtime_stat *st,
+                                   struct runtime_stat_data *rsd)
 {
        double total, ratio = 0.0;
        const char *color;
-       int ctx = evsel_context(evsel);
 
-       total = runtime_stat_avg(st, STAT_ITLB_CACHE, ctx, cpu);
+       total = runtime_stat_avg(st, STAT_ITLB_CACHE, cpu, rsd);
 
        if (total)
                ratio = avg / total * 100.0;
@@ -594,17 +603,15 @@ static void print_itlb_cache_misses(struct perf_stat_config *config,
 }
 
 static void print_ll_cache_misses(struct perf_stat_config *config,
-                                 int cpu,
-                                 struct evsel *evsel,
-                                 double avg,
+                                 int cpu, double avg,
                                  struct perf_stat_output_ctx *out,
-                                 struct runtime_stat *st)
+                                 struct runtime_stat *st,
+                                 struct runtime_stat_data *rsd)
 {
        double total, ratio = 0.0;
        const char *color;
-       int ctx = evsel_context(evsel);
 
-       total = runtime_stat_avg(st, STAT_LL_CACHE, ctx, cpu);
+       total = runtime_stat_avg(st, STAT_LL_CACHE, cpu, rsd);
 
        if (total)
                ratio = avg / total * 100.0;
@@ -662,56 +669,61 @@ static double sanitize_val(double x)
        return x;
 }
 
-static double td_total_slots(int ctx, int cpu, struct runtime_stat *st)
+static double td_total_slots(int cpu, struct runtime_stat *st,
+                            struct runtime_stat_data *rsd)
 {
-       return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, ctx, cpu);
+       return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, cpu, rsd);
 }
 
-static double td_bad_spec(int ctx, int cpu, struct runtime_stat *st)
+static double td_bad_spec(int cpu, struct runtime_stat *st,
+                         struct runtime_stat_data *rsd)
 {
        double bad_spec = 0;
        double total_slots;
        double total;
 
-       total = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, ctx, cpu) -
-               runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, ctx, cpu) +
-               runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, ctx, cpu);
+       total = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, cpu, rsd) -
+               runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, cpu, rsd) +
+               runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, cpu, rsd);
 
-       total_slots = td_total_slots(ctx, cpu, st);
+       total_slots = td_total_slots(cpu, st, rsd);
        if (total_slots)
                bad_spec = total / total_slots;
        return sanitize_val(bad_spec);
 }
 
-static double td_retiring(int ctx, int cpu, struct runtime_stat *st)
+static double td_retiring(int cpu, struct runtime_stat *st,
+                         struct runtime_stat_data *rsd)
 {
        double retiring = 0;
-       double total_slots = td_total_slots(ctx, cpu, st);
+       double total_slots = td_total_slots(cpu, st, rsd);
        double ret_slots = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED,
-                                           ctx, cpu);
+                                           cpu, rsd);
 
        if (total_slots)
                retiring = ret_slots / total_slots;
        return retiring;
 }
 
-static double td_fe_bound(int ctx, int cpu, struct runtime_stat *st)
+static double td_fe_bound(int cpu, struct runtime_stat *st,
+                         struct runtime_stat_data *rsd)
 {
        double fe_bound = 0;
-       double total_slots = td_total_slots(ctx, cpu, st);
+       double total_slots = td_total_slots(cpu, st, rsd);
        double fetch_bub = runtime_stat_avg(st, STAT_TOPDOWN_FETCH_BUBBLES,
-                                           ctx, cpu);
+                                           cpu, rsd);
 
        if (total_slots)
                fe_bound = fetch_bub / total_slots;
        return fe_bound;
 }
 
-static double td_be_bound(int ctx, int cpu, struct runtime_stat *st)
+static double td_be_bound(int cpu, struct runtime_stat *st,
+                         struct runtime_stat_data *rsd)
 {
-       double sum = (td_fe_bound(ctx, cpu, st) +
-                     td_bad_spec(ctx, cpu, st) +
-                     td_retiring(ctx, cpu, st));
+       double sum = (td_fe_bound(cpu, st, rsd) +
+                     td_bad_spec(cpu, st, rsd) +
+                     td_retiring(cpu, st, rsd));
        if (sum == 0)
                return 0;
        return sanitize_val(1.0 - sum);
@@ -722,15 +734,15 @@ static double td_be_bound(int ctx, int cpu, struct runtime_stat *st)
  * the ratios we need to recreate the sum.
  */
 
-static double td_metric_ratio(int ctx, int cpu,
-                             enum stat_type type,
-                             struct runtime_stat *stat)
+static double td_metric_ratio(int cpu, enum stat_type type,
+                             struct runtime_stat *stat,
+                             struct runtime_stat_data *rsd)
 {
-       double sum = runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, ctx, cpu) +
-               runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, ctx, cpu) +
-               runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, ctx, cpu) +
-               runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, ctx, cpu);
-       double d = runtime_stat_avg(stat, type, ctx, cpu);
+       double sum = runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, cpu, rsd) +
+               runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, cpu, rsd) +
+               runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, cpu, rsd) +
+               runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, cpu, rsd);
+       double d = runtime_stat_avg(stat, type, cpu, rsd);
 
        if (sum)
                return d / sum;
@@ -742,34 +754,33 @@ static double td_metric_ratio(int ctx, int cpu,
  * We allow two missing.
  */
 
-static bool full_td(int ctx, int cpu,
-                   struct runtime_stat *stat)
+static bool full_td(int cpu, struct runtime_stat *stat,
+                   struct runtime_stat_data *rsd)
 {
        int c = 0;
 
-       if (runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, ctx, cpu) > 0)
+       if (runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, cpu, rsd) > 0)
                c++;
-       if (runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, ctx, cpu) > 0)
+       if (runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, cpu, rsd) > 0)
                c++;
-       if (runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, ctx, cpu) > 0)
+       if (runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, cpu, rsd) > 0)
                c++;
-       if (runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, ctx, cpu) > 0)
+       if (runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, cpu, rsd) > 0)
                c++;
        return c >= 2;
 }
 
-static void print_smi_cost(struct perf_stat_config *config,
-                          int cpu, struct evsel *evsel,
+static void print_smi_cost(struct perf_stat_config *config, int cpu,
                           struct perf_stat_output_ctx *out,
-                          struct runtime_stat *st)
+                          struct runtime_stat *st,
+                          struct runtime_stat_data *rsd)
 {
        double smi_num, aperf, cycles, cost = 0.0;
-       int ctx = evsel_context(evsel);
        const char *color = NULL;
 
-       smi_num = runtime_stat_avg(st, STAT_SMI_NUM, ctx, cpu);
-       aperf = runtime_stat_avg(st, STAT_APERF, ctx, cpu);
-       cycles = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+       smi_num = runtime_stat_avg(st, STAT_SMI_NUM, cpu, rsd);
+       aperf = runtime_stat_avg(st, STAT_APERF, cpu, rsd);
+       cycles = runtime_stat_avg(st, STAT_CYCLES, cpu, rsd);
 
        if ((cycles == 0) || (aperf == 0))
                return;
@@ -804,7 +815,8 @@ static int prepare_metric(struct evsel **metric_events,
                        scale = 1e-9;
                } else {
                        v = saved_value_lookup(metric_events[i], cpu, false,
-                                              STAT_NONE, 0, st);
+                                              STAT_NONE, 0, st,
+                                              metric_events[i]->cgrp);
                        if (!v)
                                break;
                        stats = &v->stats;
@@ -930,12 +942,15 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
        print_metric_t print_metric = out->print_metric;
        double total, ratio = 0.0, total2;
        const char *color = NULL;
-       int ctx = evsel_context(evsel);
+       struct runtime_stat_data rsd = {
+               .ctx = evsel_context(evsel),
+               .cgrp = evsel->cgrp,
+       };
        struct metric_event *me;
        int num = 1;
 
        if (evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
-               total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+               total = runtime_stat_avg(st, STAT_CYCLES, cpu, &rsd);
 
                if (total) {
                        ratio = avg / total;
@@ -945,12 +960,11 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                        print_metric(config, ctxp, NULL, NULL, "insn per cycle", 0);
                }
 
-               total = runtime_stat_avg(st, STAT_STALLED_CYCLES_FRONT,
-                                        ctx, cpu);
+               total = runtime_stat_avg(st, STAT_STALLED_CYCLES_FRONT, cpu, &rsd);
 
                total = max(total, runtime_stat_avg(st,
                                                    STAT_STALLED_CYCLES_BACK,
-                                                   ctx, cpu));
+                                                   cpu, &rsd));
 
                if (total && avg) {
                        out->new_line(config, ctxp);
@@ -960,8 +974,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                                        ratio);
                }
        } else if (evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
-               if (runtime_stat_n(st, STAT_BRANCHES, ctx, cpu) != 0)
-                       print_branch_misses(config, cpu, evsel, avg, out, st);
+               if (runtime_stat_n(st, STAT_BRANCHES, cpu, &rsd) != 0)
+                       print_branch_misses(config, cpu, avg, out, st, &rsd);
                else
                        print_metric(config, ctxp, NULL, NULL, "of all branches", 0);
        } else if (
@@ -970,8 +984,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                                        ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
                                         ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
 
-               if (runtime_stat_n(st, STAT_L1_DCACHE, ctx, cpu) != 0)
-                       print_l1_dcache_misses(config, cpu, evsel, avg, out, st);
+               if (runtime_stat_n(st, STAT_L1_DCACHE, cpu, &rsd) != 0)
+                       print_l1_dcache_misses(config, cpu, avg, out, st, &rsd);
                else
                        print_metric(config, ctxp, NULL, NULL, "of all L1-dcache accesses", 0);
        } else if (
@@ -980,8 +994,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                                        ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
                                         ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
 
-               if (runtime_stat_n(st, STAT_L1_ICACHE, ctx, cpu) != 0)
-                       print_l1_icache_misses(config, cpu, evsel, avg, out, st);
+               if (runtime_stat_n(st, STAT_L1_ICACHE, cpu, &rsd) != 0)
+                       print_l1_icache_misses(config, cpu, avg, out, st, &rsd);
                else
                        print_metric(config, ctxp, NULL, NULL, "of all L1-icache accesses", 0);
        } else if (
@@ -990,8 +1004,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                                        ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
                                         ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
 
-               if (runtime_stat_n(st, STAT_DTLB_CACHE, ctx, cpu) != 0)
-                       print_dtlb_cache_misses(config, cpu, evsel, avg, out, st);
+               if (runtime_stat_n(st, STAT_DTLB_CACHE, cpu, &rsd) != 0)
+                       print_dtlb_cache_misses(config, cpu, avg, out, st, &rsd);
                else
                        print_metric(config, ctxp, NULL, NULL, "of all dTLB cache accesses", 0);
        } else if (
@@ -1000,8 +1014,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                                        ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
                                         ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
 
-               if (runtime_stat_n(st, STAT_ITLB_CACHE, ctx, cpu) != 0)
-                       print_itlb_cache_misses(config, cpu, evsel, avg, out, st);
+               if (runtime_stat_n(st, STAT_ITLB_CACHE, cpu, &rsd) != 0)
+                       print_itlb_cache_misses(config, cpu, avg, out, st, &rsd);
                else
                        print_metric(config, ctxp, NULL, NULL, "of all iTLB cache accesses", 0);
        } else if (
@@ -1010,27 +1024,27 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                                        ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
                                         ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
 
-               if (runtime_stat_n(st, STAT_LL_CACHE, ctx, cpu) != 0)
-                       print_ll_cache_misses(config, cpu, evsel, avg, out, st);
+               if (runtime_stat_n(st, STAT_LL_CACHE, cpu, &rsd) != 0)
+                       print_ll_cache_misses(config, cpu, avg, out, st, &rsd);
                else
                        print_metric(config, ctxp, NULL, NULL, "of all LL-cache accesses", 0);
        } else if (evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
-               total = runtime_stat_avg(st, STAT_CACHEREFS, ctx, cpu);
+               total = runtime_stat_avg(st, STAT_CACHEREFS, cpu, &rsd);
 
                if (total)
                        ratio = avg * 100 / total;
 
-               if (runtime_stat_n(st, STAT_CACHEREFS, ctx, cpu) != 0)
+               if (runtime_stat_n(st, STAT_CACHEREFS, cpu, &rsd) != 0)
                        print_metric(config, ctxp, NULL, "%8.3f %%",
                                     "of all cache refs", ratio);
                else
                        print_metric(config, ctxp, NULL, NULL, "of all cache refs", 0);
        } else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
-               print_stalled_cycles_frontend(config, cpu, evsel, avg, out, st);
+               print_stalled_cycles_frontend(config, cpu, avg, out, st, &rsd);
        } else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
-               print_stalled_cycles_backend(config, cpu, evsel, avg, out, st);
+               print_stalled_cycles_backend(config, cpu, avg, out, st, &rsd);
        } else if (evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
-               total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
+               total = runtime_stat_avg(st, STAT_NSECS, cpu, &rsd);
 
                if (total) {
                        ratio = avg / total;
@@ -1039,7 +1053,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                        print_metric(config, ctxp, NULL, NULL, "Ghz", 0);
                }
        } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) {
-               total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+               total = runtime_stat_avg(st, STAT_CYCLES, cpu, &rsd);
 
                if (total)
                        print_metric(config, ctxp, NULL,
@@ -1049,8 +1063,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                        print_metric(config, ctxp, NULL, NULL, "transactional cycles",
                                     0);
        } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) {
-               total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
-               total2 = runtime_stat_avg(st, STAT_CYCLES_IN_TX, ctx, cpu);
+               total = runtime_stat_avg(st, STAT_CYCLES, cpu, &rsd);
+               total2 = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu, &rsd);
 
                if (total2 < avg)
                        total2 = avg;
@@ -1060,21 +1074,19 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                else
                        print_metric(config, ctxp, NULL, NULL, "aborted cycles", 0);
        } else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) {
-               total = runtime_stat_avg(st, STAT_CYCLES_IN_TX,
-                                        ctx, cpu);
+               total = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu, &rsd);
 
                if (avg)
                        ratio = total / avg;
 
-               if (runtime_stat_n(st, STAT_CYCLES_IN_TX, ctx, cpu) != 0)
+               if (runtime_stat_n(st, STAT_CYCLES_IN_TX, cpu, &rsd) != 0)
                        print_metric(config, ctxp, NULL, "%8.0f",
                                     "cycles / transaction", ratio);
                else
                        print_metric(config, ctxp, NULL, NULL, "cycles / transaction",
                                      0);
        } else if (perf_stat_evsel__is(evsel, ELISION_START)) {
-               total = runtime_stat_avg(st, STAT_CYCLES_IN_TX,
-                                        ctx, cpu);
+               total = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu, &rsd);
 
                if (avg)
                        ratio = total / avg;
@@ -1087,28 +1099,28 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                else
                        print_metric(config, ctxp, NULL, NULL, "CPUs utilized", 0);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) {
-               double fe_bound = td_fe_bound(ctx, cpu, st);
+               double fe_bound = td_fe_bound(cpu, st, &rsd);
 
                if (fe_bound > 0.2)
                        color = PERF_COLOR_RED;
                print_metric(config, ctxp, color, "%8.1f%%", "frontend bound",
                                fe_bound * 100.);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) {
-               double retiring = td_retiring(ctx, cpu, st);
+               double retiring = td_retiring(cpu, st, &rsd);
 
                if (retiring > 0.7)
                        color = PERF_COLOR_GREEN;
                print_metric(config, ctxp, color, "%8.1f%%", "retiring",
                                retiring * 100.);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) {
-               double bad_spec = td_bad_spec(ctx, cpu, st);
+               double bad_spec = td_bad_spec(cpu, st, &rsd);
 
                if (bad_spec > 0.1)
                        color = PERF_COLOR_RED;
                print_metric(config, ctxp, color, "%8.1f%%", "bad speculation",
                                bad_spec * 100.);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) {
-               double be_bound = td_be_bound(ctx, cpu, st);
+               double be_bound = td_be_bound(cpu, st, &rsd);
                const char *name = "backend bound";
                static int have_recovery_bubbles = -1;
 
@@ -1121,43 +1133,43 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
 
                if (be_bound > 0.2)
                        color = PERF_COLOR_RED;
-               if (td_total_slots(ctx, cpu, st) > 0)
+               if (td_total_slots(cpu, st, &rsd) > 0)
                        print_metric(config, ctxp, color, "%8.1f%%", name,
                                        be_bound * 100.);
                else
                        print_metric(config, ctxp, NULL, NULL, name, 0);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_RETIRING) &&
-                       full_td(ctx, cpu, st)) {
-               double retiring = td_metric_ratio(ctx, cpu,
-                                                 STAT_TOPDOWN_RETIRING, st);
-
+                  full_td(cpu, st, &rsd)) {
+               double retiring = td_metric_ratio(cpu,
+                                                 STAT_TOPDOWN_RETIRING, st,
+                                                 &rsd);
                if (retiring > 0.7)
                        color = PERF_COLOR_GREEN;
                print_metric(config, ctxp, color, "%8.1f%%", "retiring",
                                retiring * 100.);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_FE_BOUND) &&
-                       full_td(ctx, cpu, st)) {
-               double fe_bound = td_metric_ratio(ctx, cpu,
-                                                 STAT_TOPDOWN_FE_BOUND, st);
-
+                  full_td(cpu, st, &rsd)) {
+               double fe_bound = td_metric_ratio(cpu,
+                                                 STAT_TOPDOWN_FE_BOUND, st,
+                                                 &rsd);
                if (fe_bound > 0.2)
                        color = PERF_COLOR_RED;
                print_metric(config, ctxp, color, "%8.1f%%", "frontend bound",
                                fe_bound * 100.);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_BE_BOUND) &&
-                       full_td(ctx, cpu, st)) {
-               double be_bound = td_metric_ratio(ctx, cpu,
-                                                 STAT_TOPDOWN_BE_BOUND, st);
-
+                  full_td(cpu, st, &rsd)) {
+               double be_bound = td_metric_ratio(cpu,
+                                                 STAT_TOPDOWN_BE_BOUND, st,
+                                                 &rsd);
                if (be_bound > 0.2)
                        color = PERF_COLOR_RED;
                print_metric(config, ctxp, color, "%8.1f%%", "backend bound",
                                be_bound * 100.);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_BAD_SPEC) &&
-                       full_td(ctx, cpu, st)) {
-               double bad_spec = td_metric_ratio(ctx, cpu,
-                                                 STAT_TOPDOWN_BAD_SPEC, st);
-
+                  full_td(cpu, st, &rsd)) {
+               double bad_spec = td_metric_ratio(cpu,
+                                                 STAT_TOPDOWN_BAD_SPEC, st,
+                                                 &rsd);
                if (bad_spec > 0.1)
                        color = PERF_COLOR_RED;
                print_metric(config, ctxp, color, "%8.1f%%", "bad speculation",
@@ -1165,11 +1177,11 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
        } else if (evsel->metric_expr) {
                generic_metric(config, evsel->metric_expr, evsel->metric_events, NULL,
                                evsel->name, evsel->metric_name, NULL, 1, cpu, out, st);
-       } else if (runtime_stat_n(st, STAT_NSECS, 0, cpu) != 0) {
+       } else if (runtime_stat_n(st, STAT_NSECS, cpu, &rsd) != 0) {
                char unit = 'M';
                char unit_buf[10];
 
-               total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
+               total = runtime_stat_avg(st, STAT_NSECS, cpu, &rsd);
 
                if (total)
                        ratio = 1000.0 * avg / total;
@@ -1180,7 +1192,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
                print_metric(config, ctxp, NULL, "%8.3f", unit_buf, ratio);
        } else if (perf_stat_evsel__is(evsel, SMI_NUM)) {
-               print_smi_cost(config, cpu, evsel, out, st);
+               print_smi_cost(config, cpu, out, st, &rsd);
        } else {
                num = 0;
        }
index 57c1724b7e5da213cbaa02cca8694ff86ebafdae..698358c9c0d60951eade09cc50e012a00a8eb446 100644 (file)
@@ -198,7 +198,7 @@ class LinuxSourceTree(object):
                return self.validate_config(build_dir)
 
        def run_kernel(self, args=[], build_dir='', timeout=None):
-               args.extend(['mem=1G'])
+               args.extend(['mem=1G', 'console=tty'])
                self._ops.linux_bin(args, timeout, build_dir)
                outfile = get_outfile_path(build_dir)
                subprocess.call(['stty', 'sane'])
index afbab4aeef3c894546e95d44f9d9e539e50ae945..8a917cb4426a05e30f718764485025ffab99ed39 100644 (file)
@@ -77,8 +77,10 @@ TARGETS += zram
 TARGETS_HOTPLUG = cpu-hotplug
 TARGETS_HOTPLUG += memory-hotplug
 
-# User can optionally provide a TARGETS skiplist.
-SKIP_TARGETS ?=
+# User can optionally provide a TARGETS skiplist.  By default we skip
+# BPF since it has cutting edge build time dependencies which require
+# more effort to install.
+SKIP_TARGETS ?= bpf
 ifneq ($(SKIP_TARGETS),)
        TMP := $(filter-out $(SKIP_TARGETS), $(TARGETS))
        override TARGETS := $(TMP)
index 1c5556bdd11d16e599677f661a6997cdd7f27538..0dbd594c2747cb0e82f9f27970e0905108069a3a 100644 (file)
@@ -457,7 +457,7 @@ function barf
        mov     x11, x1 // actual data
        mov     x12, x2 // data size
 
-       puts    "Mistatch: PID="
+       puts    "Mismatch: PID="
        mov     x0, x20
        bl      putdec
        puts    ", iteration="
index f95074c9b48b730e3055a70f121a8cd3e11783bc..9210691aa9985601995eef4b8af58da33cd993b9 100644 (file)
@@ -625,7 +625,7 @@ function barf
        mov     x11, x1 // actual data
        mov     x12, x2 // data size
 
-       puts    "Mistatch: PID="
+       puts    "Mismatch: PID="
        mov     x0, x20
        bl      putdec
        puts    ", iteration="
index 8c33e999319a9aa6549f4d91e6e622cd97d945fd..c51df6b91befe50b6740e6f25c05d503885b7c1f 100644 (file)
@@ -121,6 +121,9 @@ VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux)                                \
                     /sys/kernel/btf/vmlinux                            \
                     /boot/vmlinux-$(shell uname -r)
 VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
+ifeq ($(VMLINUX_BTF),)
+$(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)")
+endif
 
 # Define simple and short `make test_progs`, `make test_sysctl`, etc targets
 # to build individual tests.
index c0fe73a17ed1234ae7517b452364b32e58760771..3bfcf00c0a673771498f586410b062a115bd8220 100644 (file)
@@ -34,61 +34,6 @@ struct storage {
        struct bpf_spin_lock lock;
 };
 
-/* Copies an rm binary to a temp file. dest is a mkstemp template */
-static int copy_rm(char *dest)
-{
-       int fd_in, fd_out = -1, ret = 0;
-       struct stat stat;
-       char *buf = NULL;
-
-       fd_in = open("/bin/rm", O_RDONLY);
-       if (fd_in < 0)
-               return -errno;
-
-       fd_out = mkstemp(dest);
-       if (fd_out < 0) {
-               ret = -errno;
-               goto out;
-       }
-
-       ret = fstat(fd_in, &stat);
-       if (ret == -1) {
-               ret = -errno;
-               goto out;
-       }
-
-       buf = malloc(stat.st_blksize);
-       if (!buf) {
-               ret = -errno;
-               goto out;
-       }
-
-       while (ret = read(fd_in, buf, stat.st_blksize), ret > 0) {
-               ret = write(fd_out, buf, ret);
-               if (ret < 0) {
-                       ret = -errno;
-                       goto out;
-
-               }
-       }
-       if (ret < 0) {
-               ret = -errno;
-               goto out;
-
-       }
-
-       /* Set executable permission on the copied file */
-       ret = chmod(dest, 0100);
-       if (ret == -1)
-               ret = -errno;
-
-out:
-       free(buf);
-       close(fd_in);
-       close(fd_out);
-       return ret;
-}
-
 /* Fork and exec the provided rm binary and return the exit code of the
  * forked process and its pid.
  */
@@ -168,9 +113,11 @@ static bool check_syscall_operations(int map_fd, int obj_fd)
 
 void test_test_local_storage(void)
 {
-       char tmp_exec_path[PATH_MAX] = "/tmp/copy_of_rmXXXXXX";
+       char tmp_dir_path[64] = "/tmp/local_storageXXXXXX";
        int err, serv_sk = -1, task_fd = -1, rm_fd = -1;
        struct local_storage *skel = NULL;
+       char tmp_exec_path[64];
+       char cmd[256];
 
        skel = local_storage__open_and_load();
        if (CHECK(!skel, "skel_load", "lsm skeleton failed\n"))
@@ -189,18 +136,24 @@ void test_test_local_storage(void)
                                      task_fd))
                goto close_prog;
 
-       err = copy_rm(tmp_exec_path);
-       if (CHECK(err < 0, "copy_rm", "err %d errno %d\n", err, errno))
+       if (CHECK(!mkdtemp(tmp_dir_path), "mkdtemp",
+                 "unable to create tmpdir: %d\n", errno))
                goto close_prog;
 
+       snprintf(tmp_exec_path, sizeof(tmp_exec_path), "%s/copy_of_rm",
+                tmp_dir_path);
+       snprintf(cmd, sizeof(cmd), "cp /bin/rm %s", tmp_exec_path);
+       if (CHECK_FAIL(system(cmd)))
+               goto close_prog_rmdir;
+
        rm_fd = open(tmp_exec_path, O_RDONLY);
        if (CHECK(rm_fd < 0, "open", "failed to open %s err:%d, errno:%d",
                  tmp_exec_path, rm_fd, errno))
-               goto close_prog;
+               goto close_prog_rmdir;
 
        if (!check_syscall_operations(bpf_map__fd(skel->maps.inode_storage_map),
                                      rm_fd))
-               goto close_prog;
+               goto close_prog_rmdir;
 
        /* Sets skel->bss->monitored_pid to the pid of the forked child
         * forks a child process that executes tmp_exec_path and tries to
@@ -209,33 +162,36 @@ void test_test_local_storage(void)
         */
        err = run_self_unlink(&skel->bss->monitored_pid, tmp_exec_path);
        if (CHECK(err != EPERM, "run_self_unlink", "err %d want EPERM\n", err))
-               goto close_prog_unlink;
+               goto close_prog_rmdir;
 
        /* Set the process being monitored to be the current process */
        skel->bss->monitored_pid = getpid();
 
-       /* Remove the temporary created executable */
-       err = unlink(tmp_exec_path);
-       if (CHECK(err != 0, "unlink", "unable to unlink %s: %d", tmp_exec_path,
-                 errno))
-               goto close_prog_unlink;
+       /* Move copy_of_rm to a new location so that it triggers the
+        * inode_rename LSM hook with a new_dentry that has a NULL inode ptr.
+        */
+       snprintf(cmd, sizeof(cmd), "mv %s/copy_of_rm %s/check_null_ptr",
+                tmp_dir_path, tmp_dir_path);
+       if (CHECK_FAIL(system(cmd)))
+               goto close_prog_rmdir;
 
        CHECK(skel->data->inode_storage_result != 0, "inode_storage_result",
              "inode_local_storage not set\n");
 
        serv_sk = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
        if (CHECK(serv_sk < 0, "start_server", "failed to start server\n"))
-               goto close_prog;
+               goto close_prog_rmdir;
 
        CHECK(skel->data->sk_storage_result != 0, "sk_storage_result",
              "sk_local_storage not set\n");
 
        if (!check_syscall_operations(bpf_map__fd(skel->maps.sk_storage_map),
                                      serv_sk))
-               goto close_prog;
+               goto close_prog_rmdir;
 
-close_prog_unlink:
-       unlink(tmp_exec_path);
+close_prog_rmdir:
+       snprintf(cmd, sizeof(cmd), "rm -rf %s", tmp_dir_path);
+       system(cmd);
 close_prog:
        close(serv_sk);
        close(rm_fd);
index 5bfef2887e70600a25e41c4cd5d2f6eaa360c82a..418d9c6d495258054b9c0f84017c3cf3d95dad34 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright 2020 Google LLC.
  */
 
-#include "vmlinux.h"
+#include <linux/bpf.h>
 #include <errno.h>
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
index 3e3de130f28f303e30a45bff6501e502f905fa93..95868bc7ada9873efcbf9edb16b6bed5f8ec1b4c 100644 (file)
@@ -50,7 +50,6 @@ int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim)
        __u32 pid = bpf_get_current_pid_tgid() >> 32;
        struct local_storage *storage;
        bool is_self_unlink;
-       int err;
 
        if (pid != monitored_pid)
                return 0;
@@ -66,8 +65,27 @@ int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim)
                        return -EPERM;
        }
 
-       storage = bpf_inode_storage_get(&inode_storage_map, victim->d_inode, 0,
-                                       BPF_LOCAL_STORAGE_GET_F_CREATE);
+       return 0;
+}
+
+SEC("lsm/inode_rename")
+int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry,
+            struct inode *new_dir, struct dentry *new_dentry,
+            unsigned int flags)
+{
+       __u32 pid = bpf_get_current_pid_tgid() >> 32;
+       struct local_storage *storage;
+       int err;
+
+       /* new_dentry->d_inode can be NULL when the inode is renamed to a file
+        * that did not exist before. The helper should be able to handle this
+        * NULL pointer.
+        */
+       bpf_inode_storage_get(&inode_storage_map, new_dentry->d_inode, 0,
+                             BPF_LOCAL_STORAGE_GET_F_CREATE);
+
+       storage = bpf_inode_storage_get(&inode_storage_map, old_dentry->d_inode,
+                                       0, 0);
        if (!storage)
                return 0;
 
@@ -76,7 +94,7 @@ int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim)
                inode_storage_result = -1;
        bpf_spin_unlock(&storage->lock);
 
-       err = bpf_inode_storage_delete(&inode_storage_map, victim->d_inode);
+       err = bpf_inode_storage_delete(&inode_storage_map, old_dentry->d_inode);
        if (!err)
                inode_storage_result = err;
 
@@ -133,37 +151,18 @@ int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
        return 0;
 }
 
-SEC("lsm/file_open")
-int BPF_PROG(file_open, struct file *file)
-{
-       __u32 pid = bpf_get_current_pid_tgid() >> 32;
-       struct local_storage *storage;
-
-       if (pid != monitored_pid)
-               return 0;
-
-       if (!file->f_inode)
-               return 0;
-
-       storage = bpf_inode_storage_get(&inode_storage_map, file->f_inode, 0,
-                                       BPF_LOCAL_STORAGE_GET_F_CREATE);
-       if (!storage)
-               return 0;
-
-       bpf_spin_lock(&storage->lock);
-       storage->value = DUMMY_STORAGE_VALUE;
-       bpf_spin_unlock(&storage->lock);
-       return 0;
-}
-
 /* This uses the local storage to remember the inode of the binary that a
  * process was originally executing.
  */
 SEC("lsm/bprm_committed_creds")
 void BPF_PROG(exec, struct linux_binprm *bprm)
 {
+       __u32 pid = bpf_get_current_pid_tgid() >> 32;
        struct local_storage *storage;
 
+       if (pid != monitored_pid)
+               return;
+
        storage = bpf_task_storage_get(&task_storage_map,
                                       bpf_get_current_task_btf(), 0,
                                       BPF_LOCAL_STORAGE_GET_F_CREATE);
@@ -172,4 +171,13 @@ void BPF_PROG(exec, struct linux_binprm *bprm)
                storage->exec_inode = bprm->file->f_inode;
                bpf_spin_unlock(&storage->lock);
        }
+
+       storage = bpf_inode_storage_get(&inode_storage_map, bprm->file->f_inode,
+                                       0, BPF_LOCAL_STORAGE_GET_F_CREATE);
+       if (!storage)
+               return;
+
+       bpf_spin_lock(&storage->lock);
+       storage->value = DUMMY_STORAGE_VALUE;
+       bpf_spin_unlock(&storage->lock);
 }
index 0ad3e6305ff03ca15d51ba659cae13a3d8fcac1a..51adc42b2b40e0b9516f2b573aa99e652de377a9 100644 (file)
@@ -1312,22 +1312,58 @@ static void test_map_stress(void)
 #define DO_UPDATE 1
 #define DO_DELETE 0
 
+#define MAP_RETRIES 20
+
+static int map_update_retriable(int map_fd, const void *key, const void *value,
+                               int flags, int attempts)
+{
+       while (bpf_map_update_elem(map_fd, key, value, flags)) {
+               if (!attempts || (errno != EAGAIN && errno != EBUSY))
+                       return -errno;
+
+               usleep(1);
+               attempts--;
+       }
+
+       return 0;
+}
+
+static int map_delete_retriable(int map_fd, const void *key, int attempts)
+{
+       while (bpf_map_delete_elem(map_fd, key)) {
+               if (!attempts || (errno != EAGAIN && errno != EBUSY))
+                       return -errno;
+
+               usleep(1);
+               attempts--;
+       }
+
+       return 0;
+}
+
 static void test_update_delete(unsigned int fn, void *data)
 {
        int do_update = ((int *)data)[1];
        int fd = ((int *)data)[0];
-       int i, key, value;
+       int i, key, value, err;
 
        for (i = fn; i < MAP_SIZE; i += TASKS) {
                key = value = i;
 
                if (do_update) {
-                       assert(bpf_map_update_elem(fd, &key, &value,
-                                                  BPF_NOEXIST) == 0);
-                       assert(bpf_map_update_elem(fd, &key, &value,
-                                                  BPF_EXIST) == 0);
+                       err = map_update_retriable(fd, &key, &value, BPF_NOEXIST, MAP_RETRIES);
+                       if (err)
+                               printf("error %d %d\n", err, errno);
+                       assert(err == 0);
+                       err = map_update_retriable(fd, &key, &value, BPF_EXIST, MAP_RETRIES);
+                       if (err)
+                               printf("error %d %d\n", err, errno);
+                       assert(err == 0);
                } else {
-                       assert(bpf_map_delete_elem(fd, &key) == 0);
+                       err = map_delete_retriable(fd, &key, MAP_RETRIES);
+                       if (err)
+                               printf("error %d %d\n", err, errno);
+                       assert(err == 0);
                }
        }
 }
index 777a81404fdbd15a9643151c108a51db7d4478f3..f8569f04064b7e9684e3a93e1e03ab1e2f13fa7c 100644 (file)
@@ -50,7 +50,7 @@
 #define MAX_INSNS      BPF_MAXINSNS
 #define MAX_TEST_INSNS 1000000
 #define MAX_FIXUPS     8
-#define MAX_NR_MAPS    20
+#define MAX_NR_MAPS    21
 #define MAX_TEST_RUNS  8
 #define POINTER_VALUE  0xcafe4all
 #define TEST_DATA_LEN  64
@@ -87,6 +87,7 @@ struct bpf_test {
        int fixup_sk_storage_map[MAX_FIXUPS];
        int fixup_map_event_output[MAX_FIXUPS];
        int fixup_map_reuseport_array[MAX_FIXUPS];
+       int fixup_map_ringbuf[MAX_FIXUPS];
        const char *errstr;
        const char *errstr_unpriv;
        uint32_t insn_processed;
@@ -640,6 +641,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
        int *fixup_sk_storage_map = test->fixup_sk_storage_map;
        int *fixup_map_event_output = test->fixup_map_event_output;
        int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
+       int *fixup_map_ringbuf = test->fixup_map_ringbuf;
 
        if (test->fill_helper) {
                test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
@@ -817,6 +819,14 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
                        fixup_map_reuseport_array++;
                } while (*fixup_map_reuseport_array);
        }
+       if (*fixup_map_ringbuf) {
+               map_fds[20] = create_map(BPF_MAP_TYPE_RINGBUF, 0,
+                                          0, 4096);
+               do {
+                       prog[*fixup_map_ringbuf].imm = map_fds[20];
+                       fixup_map_ringbuf++;
+               } while (*fixup_map_ringbuf);
+       }
 }
 
 struct libcap {
index 45d43bf82f269190bc51ab29f7032e909103dca0..0b943897aaf6c136a8fea40a3804042b28c44e37 100644 (file)
        .result = ACCEPT,
        .result_unpriv = ACCEPT,
 },
+{
+       "check valid spill/fill, ptr to mem",
+       .insns = {
+       /* reserve 8 byte ringbuf memory */
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_MOV64_IMM(BPF_REG_2, 8),
+       BPF_MOV64_IMM(BPF_REG_3, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
+       /* store a pointer to the reserved memory in R6 */
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       /* check whether the reservation was successful */
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+       /* spill R6(mem) into the stack */
+       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+       /* fill it back in R7 */
+       BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8),
+       /* should be able to access *(R7) = 0 */
+       BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0),
+       /* submit the reserved ringbuf memory */
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+       BPF_MOV64_IMM(BPF_REG_2, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_ringbuf = { 1 },
+       .result = ACCEPT,
+       .result_unpriv = ACCEPT,
+},
 {
        "check corrupted spill/fill",
        .insns = {
index 014dedaa4dd28f342244d4ea6227e3f7aac361fc..1e722ee76b1fca01b4539649b1545d71179897b1 100644 (file)
@@ -715,7 +715,7 @@ static void worker_pkt_dump(void)
                int payload = *((uint32_t *)(pkt_buf[iter]->payload + PKT_HDR_SIZE));
 
                if (payload == EOT) {
-                       ksft_print_msg("End-of-tranmission frame received\n");
+                       ksft_print_msg("End-of-transmission frame received\n");
                        fprintf(stdout, "---------------------------------------\n");
                        break;
                }
@@ -747,7 +747,7 @@ static void worker_pkt_validate(void)
                        }
 
                        if (payloadseqnum == EOT) {
-                               ksft_print_msg("End-of-tranmission frame received: PASS\n");
+                               ksft_print_msg("End-of-transmission frame received: PASS\n");
                                sigvar = 1;
                                break;
                        }
index 4d900bc1f76c6bc78a006899bfa2bd62ad60aced..5c7700212f75377f0f82444194fbcd0801f717ef 100755 (executable)
@@ -230,7 +230,7 @@ switch_create()
        __mlnx_qos -i $swp4 --pfc=0,1,0,0,0,0,0,0 >/dev/null
        # PG0 will get autoconfigured to Xoff, give PG1 arbitrarily 100K, which
        # is (-2*MTU) about 80K of delay provision.
-       __mlnx_qos -i $swp3 --buffer_size=0,$_100KB,0,0,0,0,0,0 >/dev/null
+       __mlnx_qos -i $swp4 --buffer_size=0,$_100KB,0,0,0,0,0,0 >/dev/null
 
        # bridges
        # -------
index c7ca4faba2721685298f51dff3426ff3dd4f5945..fe41c6a0fa67d049601c565ccd56cc47710565ce 100644 (file)
@@ -33,7 +33,7 @@ ifeq ($(ARCH),s390)
        UNAME_M := s390x
 endif
 
-LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c lib/test_util.c
+LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c
 LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S
 LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
 LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c
index 3d96a7bfaff30f6ed7d39475cf0d1b4637ba1739..cdad1eca72f74d0745ffe8c9cee9d723dc2fdcc3 100644 (file)
@@ -7,23 +7,20 @@
  * Copyright (C) 2019, Google, Inc.
  */
 
-#define _GNU_SOURCE /* for program_invocation_name */
+#define _GNU_SOURCE /* for pipe2 */
 
 #include <stdio.h>
 #include <stdlib.h>
-#include <sys/syscall.h>
-#include <unistd.h>
-#include <asm/unistd.h>
 #include <time.h>
 #include <poll.h>
 #include <pthread.h>
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
 #include <linux/userfaultfd.h>
+#include <sys/syscall.h>
 
-#include "perf_test_util.h"
-#include "processor.h"
+#include "kvm_util.h"
 #include "test_util.h"
+#include "perf_test_util.h"
+#include "guest_modes.h"
 
 #ifdef __NR_userfaultfd
 
 #define PER_VCPU_DEBUG(...) _no_printf(__VA_ARGS__)
 #endif
 
+static int nr_vcpus = 1;
+static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
 static char *guest_data_prototype;
 
 static void *vcpu_worker(void *data)
 {
        int ret;
-       struct vcpu_args *vcpu_args = (struct vcpu_args *)data;
+       struct perf_test_vcpu_args *vcpu_args = (struct perf_test_vcpu_args *)data;
        int vcpu_id = vcpu_args->vcpu_id;
        struct kvm_vm *vm = perf_test_args.vm;
        struct kvm_run *run;
@@ -248,9 +247,14 @@ static int setup_demand_paging(struct kvm_vm *vm,
        return 0;
 }
 
-static void run_test(enum vm_guest_mode mode, bool use_uffd,
-                    useconds_t uffd_delay)
+struct test_params {
+       bool use_uffd;
+       useconds_t uffd_delay;
+};
+
+static void run_test(enum vm_guest_mode mode, void *arg)
 {
+       struct test_params *p = arg;
        pthread_t *vcpu_threads;
        pthread_t *uffd_handler_threads = NULL;
        struct uffd_handler_args *uffd_args = NULL;
@@ -261,7 +265,7 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
        int vcpu_id;
        int r;
 
-       vm = create_vm(mode, nr_vcpus, guest_percpu_mem_size);
+       vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size);
 
        perf_test_args.wr_fract = 1;
 
@@ -273,9 +277,9 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
        vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
        TEST_ASSERT(vcpu_threads, "Memory allocation failed");
 
-       add_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
+       perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
 
-       if (use_uffd) {
+       if (p->use_uffd) {
                uffd_handler_threads =
                        malloc(nr_vcpus * sizeof(*uffd_handler_threads));
                TEST_ASSERT(uffd_handler_threads, "Memory allocation failed");
@@ -308,7 +312,7 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
                        r = setup_demand_paging(vm,
                                                &uffd_handler_threads[vcpu_id],
                                                pipefds[vcpu_id * 2],
-                                               uffd_delay, &uffd_args[vcpu_id],
+                                               p->uffd_delay, &uffd_args[vcpu_id],
                                                vcpu_hva, guest_percpu_mem_size);
                        if (r < 0)
                                exit(-r);
@@ -339,7 +343,7 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
 
        pr_info("All vCPU threads joined\n");
 
-       if (use_uffd) {
+       if (p->use_uffd) {
                char c;
 
                /* Tell the user fault fd handler threads to quit */
@@ -357,43 +361,23 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
                perf_test_args.vcpu_args[0].pages * nr_vcpus /
                ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0));
 
-       ucall_uninit(vm);
-       kvm_vm_free(vm);
+       perf_test_destroy_vm(vm);
 
        free(guest_data_prototype);
        free(vcpu_threads);
-       if (use_uffd) {
+       if (p->use_uffd) {
                free(uffd_handler_threads);
                free(uffd_args);
                free(pipefds);
        }
 }
 
-struct guest_mode {
-       bool supported;
-       bool enabled;
-};
-static struct guest_mode guest_modes[NUM_VM_MODES];
-
-#define guest_mode_init(mode, supported, enabled) ({ \
-       guest_modes[mode] = (struct guest_mode){ supported, enabled }; \
-})
-
 static void help(char *name)
 {
-       int i;
-
        puts("");
        printf("usage: %s [-h] [-m mode] [-u] [-d uffd_delay_usec]\n"
               "          [-b memory] [-v vcpus]\n", name);
-       printf(" -m: specify the guest mode ID to test\n"
-              "     (default: test all supported modes)\n"
-              "     This option may be used multiple times.\n"
-              "     Guest mode IDs:\n");
-       for (i = 0; i < NUM_VM_MODES; ++i) {
-               printf("         %d:    %s%s\n", i, vm_guest_mode_string(i),
-                      guest_modes[i].supported ? " (supported)" : "");
-       }
+       guest_modes_help();
        printf(" -u: use User Fault FD to handle vCPU page\n"
               "     faults.\n");
        printf(" -d: add a delay in usec to the User Fault\n"
@@ -410,53 +394,22 @@ static void help(char *name)
 int main(int argc, char *argv[])
 {
        int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
-       bool mode_selected = false;
-       unsigned int mode;
-       int opt, i;
-       bool use_uffd = false;
-       useconds_t uffd_delay = 0;
-
-#ifdef __x86_64__
-       guest_mode_init(VM_MODE_PXXV48_4K, true, true);
-#endif
-#ifdef __aarch64__
-       guest_mode_init(VM_MODE_P40V48_4K, true, true);
-       guest_mode_init(VM_MODE_P40V48_64K, true, true);
-       {
-               unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
-
-               if (limit >= 52)
-                       guest_mode_init(VM_MODE_P52V48_64K, true, true);
-               if (limit >= 48) {
-                       guest_mode_init(VM_MODE_P48V48_4K, true, true);
-                       guest_mode_init(VM_MODE_P48V48_64K, true, true);
-               }
-       }
-#endif
-#ifdef __s390x__
-       guest_mode_init(VM_MODE_P40V48_4K, true, true);
-#endif
+       struct test_params p = {};
+       int opt;
+
+       guest_modes_append_default();
 
        while ((opt = getopt(argc, argv, "hm:ud:b:v:")) != -1) {
                switch (opt) {
                case 'm':
-                       if (!mode_selected) {
-                               for (i = 0; i < NUM_VM_MODES; ++i)
-                                       guest_modes[i].enabled = false;
-                               mode_selected = true;
-                       }
-                       mode = strtoul(optarg, NULL, 10);
-                       TEST_ASSERT(mode < NUM_VM_MODES,
-                                   "Guest mode ID %d too big", mode);
-                       guest_modes[mode].enabled = true;
+                       guest_modes_cmdline(optarg);
                        break;
                case 'u':
-                       use_uffd = true;
+                       p.use_uffd = true;
                        break;
                case 'd':
-                       uffd_delay = strtoul(optarg, NULL, 0);
-                       TEST_ASSERT(uffd_delay >= 0,
-                                   "A negative UFFD delay is not supported.");
+                       p.uffd_delay = strtoul(optarg, NULL, 0);
+                       TEST_ASSERT(p.uffd_delay >= 0, "A negative UFFD delay is not supported.");
                        break;
                case 'b':
                        guest_percpu_mem_size = parse_size(optarg);
@@ -473,14 +426,7 @@ int main(int argc, char *argv[])
                }
        }
 
-       for (i = 0; i < NUM_VM_MODES; ++i) {
-               if (!guest_modes[i].enabled)
-                       continue;
-               TEST_ASSERT(guest_modes[i].supported,
-                           "Guest mode ID %d (%s) not supported.",
-                           i, vm_guest_mode_string(i));
-               run_test(i, use_uffd, uffd_delay);
-       }
+       for_each_guest_mode(run_test, &p);
 
        return 0;
 }
index 9c6a7be31e033280f2e0e347f1d9bda46aff4ac3..2283a0ec74a973cd7ee188f576420c3cb441c886 100644 (file)
@@ -8,29 +8,28 @@
  * Copyright (C) 2020, Google, Inc.
  */
 
-#define _GNU_SOURCE /* for program_invocation_name */
-
 #include <stdio.h>
 #include <stdlib.h>
-#include <unistd.h>
 #include <time.h>
 #include <pthread.h>
 #include <linux/bitmap.h>
-#include <linux/bitops.h>
 
 #include "kvm_util.h"
-#include "perf_test_util.h"
-#include "processor.h"
 #include "test_util.h"
+#include "perf_test_util.h"
+#include "guest_modes.h"
 
 /* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
 #define TEST_HOST_LOOP_N               2UL
 
+static int nr_vcpus = 1;
+static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
+
 /* Host variables */
 static u64 dirty_log_manual_caps;
 static bool host_quit;
 static uint64_t iteration;
-static uint64_t vcpu_last_completed_iteration[MAX_VCPUS];
+static uint64_t vcpu_last_completed_iteration[KVM_MAX_VCPUS];
 
 static void *vcpu_worker(void *data)
 {
@@ -42,7 +41,7 @@ static void *vcpu_worker(void *data)
        struct timespec ts_diff;
        struct timespec total = (struct timespec){0};
        struct timespec avg;
-       struct vcpu_args *vcpu_args = (struct vcpu_args *)data;
+       struct perf_test_vcpu_args *vcpu_args = (struct perf_test_vcpu_args *)data;
        int vcpu_id = vcpu_args->vcpu_id;
 
        vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
@@ -89,9 +88,15 @@ static void *vcpu_worker(void *data)
        return NULL;
 }
 
-static void run_test(enum vm_guest_mode mode, unsigned long iterations,
-                    uint64_t phys_offset, int wr_fract)
+struct test_params {
+       unsigned long iterations;
+       uint64_t phys_offset;
+       int wr_fract;
+};
+
+static void run_test(enum vm_guest_mode mode, void *arg)
 {
+       struct test_params *p = arg;
        pthread_t *vcpu_threads;
        struct kvm_vm *vm;
        unsigned long *bmap;
@@ -106,9 +111,9 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
        struct kvm_enable_cap cap = {};
        struct timespec clear_dirty_log_total = (struct timespec){0};
 
-       vm = create_vm(mode, nr_vcpus, guest_percpu_mem_size);
+       vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size);
 
-       perf_test_args.wr_fract = wr_fract;
+       perf_test_args.wr_fract = p->wr_fract;
 
        guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
        guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
@@ -124,7 +129,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
        vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
        TEST_ASSERT(vcpu_threads, "Memory allocation failed");
 
-       add_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
+       perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
 
        sync_global_to_guest(vm, perf_test_args);
 
@@ -150,13 +155,13 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
 
        /* Enable dirty logging */
        clock_gettime(CLOCK_MONOTONIC, &start);
-       vm_mem_region_set_flags(vm, TEST_MEM_SLOT_INDEX,
+       vm_mem_region_set_flags(vm, PERF_TEST_MEM_SLOT_INDEX,
                                KVM_MEM_LOG_DIRTY_PAGES);
        ts_diff = timespec_diff_now(start);
        pr_info("Enabling dirty logging time: %ld.%.9lds\n\n",
                ts_diff.tv_sec, ts_diff.tv_nsec);
 
-       while (iteration < iterations) {
+       while (iteration < p->iterations) {
                /*
                 * Incrementing the iteration number will start the vCPUs
                 * dirtying memory again.
@@ -177,7 +182,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
                        iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
 
                clock_gettime(CLOCK_MONOTONIC, &start);
-               kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
+               kvm_vm_get_dirty_log(vm, PERF_TEST_MEM_SLOT_INDEX, bmap);
 
                ts_diff = timespec_diff_now(start);
                get_dirty_log_total = timespec_add(get_dirty_log_total,
@@ -187,7 +192,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
 
                if (dirty_log_manual_caps) {
                        clock_gettime(CLOCK_MONOTONIC, &start);
-                       kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
+                       kvm_vm_clear_dirty_log(vm, PERF_TEST_MEM_SLOT_INDEX, bmap, 0,
                                               host_num_pages);
 
                        ts_diff = timespec_diff_now(start);
@@ -205,43 +210,30 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
 
        /* Disable dirty logging */
        clock_gettime(CLOCK_MONOTONIC, &start);
-       vm_mem_region_set_flags(vm, TEST_MEM_SLOT_INDEX, 0);
+       vm_mem_region_set_flags(vm, PERF_TEST_MEM_SLOT_INDEX, 0);
        ts_diff = timespec_diff_now(start);
        pr_info("Disabling dirty logging time: %ld.%.9lds\n",
                ts_diff.tv_sec, ts_diff.tv_nsec);
 
-       avg = timespec_div(get_dirty_log_total, iterations);
+       avg = timespec_div(get_dirty_log_total, p->iterations);
        pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
-               iterations, get_dirty_log_total.tv_sec,
+               p->iterations, get_dirty_log_total.tv_sec,
                get_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
 
        if (dirty_log_manual_caps) {
-               avg = timespec_div(clear_dirty_log_total, iterations);
+               avg = timespec_div(clear_dirty_log_total, p->iterations);
                pr_info("Clear dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
-                       iterations, clear_dirty_log_total.tv_sec,
+                       p->iterations, clear_dirty_log_total.tv_sec,
                        clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
        }
 
        free(bmap);
        free(vcpu_threads);
-       ucall_uninit(vm);
-       kvm_vm_free(vm);
+       perf_test_destroy_vm(vm);
 }
 
-struct guest_mode {
-       bool supported;
-       bool enabled;
-};
-static struct guest_mode guest_modes[NUM_VM_MODES];
-
-#define guest_mode_init(mode, supported, enabled) ({ \
-       guest_modes[mode] = (struct guest_mode){ supported, enabled }; \
-})
-
 static void help(char *name)
 {
-       int i;
-
        puts("");
        printf("usage: %s [-h] [-i iterations] [-p offset] "
               "[-m mode] [-b vcpu bytes] [-v vcpus]\n", name);
@@ -250,14 +242,7 @@ static void help(char *name)
               TEST_HOST_LOOP_N);
        printf(" -p: specify guest physical test memory offset\n"
               "     Warning: a low offset can conflict with the loaded test code.\n");
-       printf(" -m: specify the guest mode ID to test "
-              "(default: test all supported modes)\n"
-              "     This option may be used multiple times.\n"
-              "     Guest mode IDs:\n");
-       for (i = 0; i < NUM_VM_MODES; ++i) {
-               printf("         %d:    %s%s\n", i, vm_guest_mode_string(i),
-                      guest_modes[i].supported ? " (supported)" : "");
-       }
+       guest_modes_help();
        printf(" -b: specify the size of the memory region which should be\n"
               "     dirtied by each vCPU. e.g. 10M or 3G.\n"
               "     (default: 1G)\n");
@@ -272,74 +257,43 @@ static void help(char *name)
 
 int main(int argc, char *argv[])
 {
-       unsigned long iterations = TEST_HOST_LOOP_N;
-       bool mode_selected = false;
-       uint64_t phys_offset = 0;
-       unsigned int mode;
-       int opt, i;
-       int wr_fract = 1;
+       int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
+       struct test_params p = {
+               .iterations = TEST_HOST_LOOP_N,
+               .wr_fract = 1,
+       };
+       int opt;
 
        dirty_log_manual_caps =
                kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
        dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
                                  KVM_DIRTY_LOG_INITIALLY_SET);
 
-#ifdef __x86_64__
-       guest_mode_init(VM_MODE_PXXV48_4K, true, true);
-#endif
-#ifdef __aarch64__
-       guest_mode_init(VM_MODE_P40V48_4K, true, true);
-       guest_mode_init(VM_MODE_P40V48_64K, true, true);
-
-       {
-               unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
-
-               if (limit >= 52)
-                       guest_mode_init(VM_MODE_P52V48_64K, true, true);
-               if (limit >= 48) {
-                       guest_mode_init(VM_MODE_P48V48_4K, true, true);
-                       guest_mode_init(VM_MODE_P48V48_64K, true, true);
-               }
-       }
-#endif
-#ifdef __s390x__
-       guest_mode_init(VM_MODE_P40V48_4K, true, true);
-#endif
+       guest_modes_append_default();
 
        while ((opt = getopt(argc, argv, "hi:p:m:b:f:v:")) != -1) {
                switch (opt) {
                case 'i':
-                       iterations = strtol(optarg, NULL, 10);
+                       p.iterations = strtol(optarg, NULL, 10);
                        break;
                case 'p':
-                       phys_offset = strtoull(optarg, NULL, 0);
+                       p.phys_offset = strtoull(optarg, NULL, 0);
                        break;
                case 'm':
-                       if (!mode_selected) {
-                               for (i = 0; i < NUM_VM_MODES; ++i)
-                                       guest_modes[i].enabled = false;
-                               mode_selected = true;
-                       }
-                       mode = strtoul(optarg, NULL, 10);
-                       TEST_ASSERT(mode < NUM_VM_MODES,
-                                   "Guest mode ID %d too big", mode);
-                       guest_modes[mode].enabled = true;
+                       guest_modes_cmdline(optarg);
                        break;
                case 'b':
                        guest_percpu_mem_size = parse_size(optarg);
                        break;
                case 'f':
-                       wr_fract = atoi(optarg);
-                       TEST_ASSERT(wr_fract >= 1,
+                       p.wr_fract = atoi(optarg);
+                       TEST_ASSERT(p.wr_fract >= 1,
                                    "Write fraction cannot be less than one");
                        break;
                case 'v':
                        nr_vcpus = atoi(optarg);
-                       TEST_ASSERT(nr_vcpus > 0,
-                                   "Must have a positive number of vCPUs");
-                       TEST_ASSERT(nr_vcpus <= MAX_VCPUS,
-                                   "This test does not currently support\n"
-                                   "more than %d vCPUs.", MAX_VCPUS);
+                       TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
+                                   "Invalid number of vcpus, must be between 1 and %d", max_vcpus);
                        break;
                case 'h':
                default:
@@ -348,18 +302,11 @@ int main(int argc, char *argv[])
                }
        }
 
-       TEST_ASSERT(iterations >= 2, "The test should have at least two iterations");
+       TEST_ASSERT(p.iterations >= 2, "The test should have at least two iterations");
 
-       pr_info("Test iterations: %"PRIu64"\n", iterations);
+       pr_info("Test iterations: %"PRIu64"\n", p.iterations);
 
-       for (i = 0; i < NUM_VM_MODES; ++i) {
-               if (!guest_modes[i].enabled)
-                       continue;
-               TEST_ASSERT(guest_modes[i].supported,
-                           "Guest mode ID %d (%s) not supported.",
-                           i, vm_guest_mode_string(i));
-               run_test(i, iterations, phys_offset, wr_fract);
-       }
+       for_each_guest_mode(run_test, &p);
 
        return 0;
 }
index 471baecb7772a86f4f801fcb58206877736ae396..bb2752d78fe3a8b96b13d08f12e7bc3aa2ec1d3a 100644 (file)
@@ -9,8 +9,6 @@
 
 #include <stdio.h>
 #include <stdlib.h>
-#include <unistd.h>
-#include <time.h>
 #include <pthread.h>
 #include <semaphore.h>
 #include <sys/types.h>
@@ -20,8 +18,9 @@
 #include <linux/bitops.h>
 #include <asm/barrier.h>
 
-#include "test_util.h"
 #include "kvm_util.h"
+#include "test_util.h"
+#include "guest_modes.h"
 #include "processor.h"
 
 #define VCPU_ID                                1
@@ -673,9 +672,15 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
 #define DIRTY_MEM_BITS 30 /* 1G */
 #define PAGE_SHIFT_4K  12
 
-static void run_test(enum vm_guest_mode mode, unsigned long iterations,
-                    unsigned long interval, uint64_t phys_offset)
+struct test_params {
+       unsigned long iterations;
+       unsigned long interval;
+       uint64_t phys_offset;
+};
+
+static void run_test(enum vm_guest_mode mode, void *arg)
 {
+       struct test_params *p = arg;
        struct kvm_vm *vm;
        unsigned long *bmap;
 
@@ -709,12 +714,12 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
        host_page_size = getpagesize();
        host_num_pages = vm_num_host_pages(mode, guest_num_pages);
 
-       if (!phys_offset) {
+       if (!p->phys_offset) {
                guest_test_phys_mem = (vm_get_max_gfn(vm) -
                                       guest_num_pages) * guest_page_size;
                guest_test_phys_mem &= ~(host_page_size - 1);
        } else {
-               guest_test_phys_mem = phys_offset;
+               guest_test_phys_mem = p->phys_offset;
        }
 
 #ifdef __s390x__
@@ -758,9 +763,9 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
 
        pthread_create(&vcpu_thread, NULL, vcpu_worker, vm);
 
-       while (iteration < iterations) {
+       while (iteration < p->iterations) {
                /* Give the vcpu thread some time to dirty some pages */
-               usleep(interval * 1000);
+               usleep(p->interval * 1000);
                log_mode_collect_dirty_pages(vm, TEST_MEM_SLOT_INDEX,
                                             bmap, host_num_pages);
                vm_dirty_log_verify(mode, bmap);
@@ -783,20 +788,8 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
        kvm_vm_free(vm);
 }
 
-struct guest_mode {
-       bool supported;
-       bool enabled;
-};
-static struct guest_mode guest_modes[NUM_VM_MODES];
-
-#define guest_mode_init(mode, supported, enabled) ({ \
-       guest_modes[mode] = (struct guest_mode){ supported, enabled }; \
-})
-
 static void help(char *name)
 {
-       int i;
-
        puts("");
        printf("usage: %s [-h] [-i iterations] [-I interval] "
               "[-p offset] [-m mode]\n", name);
@@ -813,51 +806,23 @@ static void help(char *name)
        printf(" -M: specify the host logging mode "
               "(default: run all log modes).  Supported modes: \n\t");
        log_modes_dump();
-       printf(" -m: specify the guest mode ID to test "
-              "(default: test all supported modes)\n"
-              "     This option may be used multiple times.\n"
-              "     Guest mode IDs:\n");
-       for (i = 0; i < NUM_VM_MODES; ++i) {
-               printf("         %d:    %s%s\n", i, vm_guest_mode_string(i),
-                      guest_modes[i].supported ? " (supported)" : "");
-       }
+       guest_modes_help();
        puts("");
        exit(0);
 }
 
 int main(int argc, char *argv[])
 {
-       unsigned long iterations = TEST_HOST_LOOP_N;
-       unsigned long interval = TEST_HOST_LOOP_INTERVAL;
-       bool mode_selected = false;
-       uint64_t phys_offset = 0;
-       unsigned int mode;
-       int opt, i, j;
+       struct test_params p = {
+               .iterations = TEST_HOST_LOOP_N,
+               .interval = TEST_HOST_LOOP_INTERVAL,
+       };
+       int opt, i;
 
        sem_init(&dirty_ring_vcpu_stop, 0, 0);
        sem_init(&dirty_ring_vcpu_cont, 0, 0);
 
-#ifdef __x86_64__
-       guest_mode_init(VM_MODE_PXXV48_4K, true, true);
-#endif
-#ifdef __aarch64__
-       guest_mode_init(VM_MODE_P40V48_4K, true, true);
-       guest_mode_init(VM_MODE_P40V48_64K, true, true);
-
-       {
-               unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
-
-               if (limit >= 52)
-                       guest_mode_init(VM_MODE_P52V48_64K, true, true);
-               if (limit >= 48) {
-                       guest_mode_init(VM_MODE_P48V48_4K, true, true);
-                       guest_mode_init(VM_MODE_P48V48_64K, true, true);
-               }
-       }
-#endif
-#ifdef __s390x__
-       guest_mode_init(VM_MODE_P40V48_4K, true, true);
-#endif
+       guest_modes_append_default();
 
        while ((opt = getopt(argc, argv, "c:hi:I:p:m:M:")) != -1) {
                switch (opt) {
@@ -865,24 +830,16 @@ int main(int argc, char *argv[])
                        test_dirty_ring_count = strtol(optarg, NULL, 10);
                        break;
                case 'i':
-                       iterations = strtol(optarg, NULL, 10);
+                       p.iterations = strtol(optarg, NULL, 10);
                        break;
                case 'I':
-                       interval = strtol(optarg, NULL, 10);
+                       p.interval = strtol(optarg, NULL, 10);
                        break;
                case 'p':
-                       phys_offset = strtoull(optarg, NULL, 0);
+                       p.phys_offset = strtoull(optarg, NULL, 0);
                        break;
                case 'm':
-                       if (!mode_selected) {
-                               for (i = 0; i < NUM_VM_MODES; ++i)
-                                       guest_modes[i].enabled = false;
-                               mode_selected = true;
-                       }
-                       mode = strtoul(optarg, NULL, 10);
-                       TEST_ASSERT(mode < NUM_VM_MODES,
-                                   "Guest mode ID %d too big", mode);
-                       guest_modes[mode].enabled = true;
+                       guest_modes_cmdline(optarg);
                        break;
                case 'M':
                        if (!strcmp(optarg, "all")) {
@@ -911,32 +868,24 @@ int main(int argc, char *argv[])
                }
        }
 
-       TEST_ASSERT(iterations > 2, "Iterations must be greater than two");
-       TEST_ASSERT(interval > 0, "Interval must be greater than zero");
+       TEST_ASSERT(p.iterations > 2, "Iterations must be greater than two");
+       TEST_ASSERT(p.interval > 0, "Interval must be greater than zero");
 
        pr_info("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
-               iterations, interval);
+               p.iterations, p.interval);
 
        srandom(time(0));
 
-       for (i = 0; i < NUM_VM_MODES; ++i) {
-               if (!guest_modes[i].enabled)
-                       continue;
-               TEST_ASSERT(guest_modes[i].supported,
-                           "Guest mode ID %d (%s) not supported.",
-                           i, vm_guest_mode_string(i));
-               if (host_log_mode_option == LOG_MODE_ALL) {
-                       /* Run each log mode */
-                       for (j = 0; j < LOG_MODE_NUM; j++) {
-                               pr_info("Testing Log Mode '%s'\n",
-                                       log_modes[j].name);
-                               host_log_mode = j;
-                               run_test(i, iterations, interval, phys_offset);
-                       }
-               } else {
-                       host_log_mode = host_log_mode_option;
-                       run_test(i, iterations, interval, phys_offset);
+       if (host_log_mode_option == LOG_MODE_ALL) {
+               /* Run each log mode */
+               for (i = 0; i < LOG_MODE_NUM; i++) {
+                       pr_info("Testing Log Mode '%s'\n", log_modes[i].name);
+                       host_log_mode = i;
+                       for_each_guest_mode(run_test, &p);
                }
+       } else {
+               host_log_mode = host_log_mode_option;
+               for_each_guest_mode(run_test, &p);
        }
 
        return 0;
diff --git a/tools/testing/selftests/kvm/include/guest_modes.h b/tools/testing/selftests/kvm/include/guest_modes.h
new file mode 100644 (file)
index 0000000..b691df3
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020, Red Hat, Inc.
+ */
+#include "kvm_util.h"
+
+struct guest_mode {
+       bool supported;
+       bool enabled;
+};
+
+extern struct guest_mode guest_modes[NUM_VM_MODES];
+
+#define guest_mode_append(mode, supported, enabled) ({ \
+       guest_modes[mode] = (struct guest_mode){ supported, enabled }; \
+})
+
+void guest_modes_append_default(void);
+void for_each_guest_mode(void (*func)(enum vm_guest_mode, void *), void *arg);
+void guest_modes_help(void);
+void guest_modes_cmdline(const char *arg);
index dfa9d369e8fc627587f0e286b1a840b78eddc4d1..5cbb861525edfb312bc3632894f6af862b41c7c8 100644 (file)
@@ -16,6 +16,7 @@
 
 #include "sparsebit.h"
 
+#define KVM_MAX_VCPUS 512
 
 /*
  * Callers of kvm_util only have an incomplete/opaque description of the
@@ -70,6 +71,14 @@ enum vm_guest_mode {
 #define vm_guest_mode_string(m) vm_guest_mode_string[m]
 extern const char * const vm_guest_mode_string[];
 
+struct vm_guest_mode_params {
+       unsigned int pa_bits;
+       unsigned int va_bits;
+       unsigned int page_size;
+       unsigned int page_shift;
+};
+extern const struct vm_guest_mode_params vm_guest_mode_params[];
+
 enum vm_mem_backing_src_type {
        VM_MEM_SRC_ANONYMOUS,
        VM_MEM_SRC_ANONYMOUS_THP,
index 239421e4f6b813088ecc2231c2d4a9296a7ee3d2..b1188823c31b776eea04884e29b18de0fab9d328 100644 (file)
@@ -9,38 +9,15 @@
 #define SELFTEST_KVM_PERF_TEST_UTIL_H
 
 #include "kvm_util.h"
-#include "processor.h"
-
-#define MAX_VCPUS 512
-
-#define PAGE_SHIFT_4K  12
-#define PTES_PER_4K_PT 512
-
-#define TEST_MEM_SLOT_INDEX            1
 
 /* Default guest test virtual memory offset */
 #define DEFAULT_GUEST_TEST_MEM         0xc0000000
 
 #define DEFAULT_PER_VCPU_MEM_SIZE      (1 << 30) /* 1G */
 
-/*
- * Guest physical memory offset of the testing memory slot.
- * This will be set to the topmost valid physical address minus
- * the test memory size.
- */
-static uint64_t guest_test_phys_mem;
-
-/*
- * Guest virtual memory offset of the testing memory slot.
- * Must not conflict with identity mapped test code.
- */
-static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
-static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
-
-/* Number of VCPUs for the test */
-static int nr_vcpus = 1;
+#define PERF_TEST_MEM_SLOT_INDEX       1
 
-struct vcpu_args {
+struct perf_test_vcpu_args {
        uint64_t gva;
        uint64_t pages;
 
@@ -54,141 +31,21 @@ struct perf_test_args {
        uint64_t guest_page_size;
        int wr_fract;
 
-       struct vcpu_args vcpu_args[MAX_VCPUS];
+       struct perf_test_vcpu_args vcpu_args[KVM_MAX_VCPUS];
 };
 
-static struct perf_test_args perf_test_args;
+extern struct perf_test_args perf_test_args;
 
 /*
- * Continuously write to the first 8 bytes of each page in the
- * specified region.
+ * Guest physical memory offset of the testing memory slot.
+ * This will be set to the topmost valid physical address minus
+ * the test memory size.
  */
-static void guest_code(uint32_t vcpu_id)
-{
-       struct vcpu_args *vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
-       uint64_t gva;
-       uint64_t pages;
-       int i;
-
-       /* Make sure vCPU args data structure is not corrupt. */
-       GUEST_ASSERT(vcpu_args->vcpu_id == vcpu_id);
-
-       gva = vcpu_args->gva;
-       pages = vcpu_args->pages;
-
-       while (true) {
-               for (i = 0; i < pages; i++) {
-                       uint64_t addr = gva + (i * perf_test_args.guest_page_size);
-
-                       if (i % perf_test_args.wr_fract == 0)
-                               *(uint64_t *)addr = 0x0123456789ABCDEF;
-                       else
-                               READ_ONCE(*(uint64_t *)addr);
-               }
-
-               GUEST_SYNC(1);
-       }
-}
-
-static struct kvm_vm *create_vm(enum vm_guest_mode mode, int vcpus,
-                               uint64_t vcpu_memory_bytes)
-{
-       struct kvm_vm *vm;
-       uint64_t pages = DEFAULT_GUEST_PHY_PAGES;
-       uint64_t guest_num_pages;
-
-       /* Account for a few pages per-vCPU for stacks */
-       pages += DEFAULT_STACK_PGS * vcpus;
-
-       /*
-        * Reserve twice the ammount of memory needed to map the test region and
-        * the page table / stacks region, at 4k, for page tables. Do the
-        * calculation with 4K page size: the smallest of all archs. (e.g., 64K
-        * page size guest will need even less memory for page tables).
-        */
-       pages += (2 * pages) / PTES_PER_4K_PT;
-       pages += ((2 * vcpus * vcpu_memory_bytes) >> PAGE_SHIFT_4K) /
-                PTES_PER_4K_PT;
-       pages = vm_adjust_num_guest_pages(mode, pages);
-
-       pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
-
-       vm = vm_create(mode, pages, O_RDWR);
-       kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
-#ifdef __x86_64__
-       vm_create_irqchip(vm);
-#endif
-
-       perf_test_args.vm = vm;
-       perf_test_args.guest_page_size = vm_get_page_size(vm);
-       perf_test_args.host_page_size = getpagesize();
-
-       TEST_ASSERT(vcpu_memory_bytes % perf_test_args.guest_page_size == 0,
-                   "Guest memory size is not guest page size aligned.");
-
-       guest_num_pages = (vcpus * vcpu_memory_bytes) /
-                         perf_test_args.guest_page_size;
-       guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
-
-       /*
-        * If there should be more memory in the guest test region than there
-        * can be pages in the guest, it will definitely cause problems.
-        */
-       TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm),
-                   "Requested more guest memory than address space allows.\n"
-                   "    guest pages: %lx max gfn: %x vcpus: %d wss: %lx]\n",
-                   guest_num_pages, vm_get_max_gfn(vm), vcpus,
-                   vcpu_memory_bytes);
-
-       TEST_ASSERT(vcpu_memory_bytes % perf_test_args.host_page_size == 0,
-                   "Guest memory size is not host page size aligned.");
-
-       guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) *
-                             perf_test_args.guest_page_size;
-       guest_test_phys_mem &= ~(perf_test_args.host_page_size - 1);
-
-#ifdef __s390x__
-       /* Align to 1M (segment size) */
-       guest_test_phys_mem &= ~((1 << 20) - 1);
-#endif
-
-       pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
-
-       /* Add an extra memory slot for testing */
-       vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
-                                   guest_test_phys_mem,
-                                   TEST_MEM_SLOT_INDEX,
-                                   guest_num_pages, 0);
-
-       /* Do mapping for the demand paging memory slot */
-       virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0);
-
-       ucall_init(vm, NULL);
-
-       return vm;
-}
-
-static void add_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes)
-{
-       vm_paddr_t vcpu_gpa;
-       struct vcpu_args *vcpu_args;
-       int vcpu_id;
-
-       for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
-               vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
-
-               vm_vcpu_add_default(vm, vcpu_id, guest_code);
-
-               vcpu_args->vcpu_id = vcpu_id;
-               vcpu_args->gva = guest_test_virt_mem +
-                                (vcpu_id * vcpu_memory_bytes);
-               vcpu_args->pages = vcpu_memory_bytes /
-                                  perf_test_args.guest_page_size;
+extern uint64_t guest_test_phys_mem;
 
-               vcpu_gpa = guest_test_phys_mem + (vcpu_id * vcpu_memory_bytes);
-               pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
-                        vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_memory_bytes);
-       }
-}
+struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
+                               uint64_t vcpu_memory_bytes);
+void perf_test_destroy_vm(struct kvm_vm *vm);
+void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes);
 
 #endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */
diff --git a/tools/testing/selftests/kvm/lib/guest_modes.c b/tools/testing/selftests/kvm/lib/guest_modes.c
new file mode 100644 (file)
index 0000000..25bff30
--- /dev/null
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020, Red Hat, Inc.
+ */
+#include "guest_modes.h"
+
+struct guest_mode guest_modes[NUM_VM_MODES];
+
+void guest_modes_append_default(void)
+{
+       guest_mode_append(VM_MODE_DEFAULT, true, true);
+
+#ifdef __aarch64__
+       guest_mode_append(VM_MODE_P40V48_64K, true, true);
+       {
+               unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
+               if (limit >= 52)
+                       guest_mode_append(VM_MODE_P52V48_64K, true, true);
+               if (limit >= 48) {
+                       guest_mode_append(VM_MODE_P48V48_4K, true, true);
+                       guest_mode_append(VM_MODE_P48V48_64K, true, true);
+               }
+       }
+#endif
+}
+
+void for_each_guest_mode(void (*func)(enum vm_guest_mode, void *), void *arg)
+{
+       int i;
+
+       for (i = 0; i < NUM_VM_MODES; ++i) {
+               if (!guest_modes[i].enabled)
+                       continue;
+               TEST_ASSERT(guest_modes[i].supported,
+                           "Guest mode ID %d (%s) not supported.",
+                           i, vm_guest_mode_string(i));
+               func(i, arg);
+       }
+}
+
+void guest_modes_help(void)
+{
+       int i;
+
+       printf(" -m: specify the guest mode ID to test\n"
+              "     (default: test all supported modes)\n"
+              "     This option may be used multiple times.\n"
+              "     Guest mode IDs:\n");
+       for (i = 0; i < NUM_VM_MODES; ++i) {
+               printf("         %d:    %s%s\n", i, vm_guest_mode_string(i),
+                      guest_modes[i].supported ? " (supported)" : "");
+       }
+}
+
+void guest_modes_cmdline(const char *arg)
+{
+       static bool mode_selected;
+       unsigned int mode;
+       int i;
+
+       if (!mode_selected) {
+               for (i = 0; i < NUM_VM_MODES; ++i)
+                       guest_modes[i].enabled = false;
+               mode_selected = true;
+       }
+
+       mode = strtoul(optarg, NULL, 10);
+       TEST_ASSERT(mode < NUM_VM_MODES, "Guest mode ID %d too big", mode);
+       guest_modes[mode].enabled = true;
+}
index 88ef7067f1e668b8f3f147a3b67360e2a2b51fce..fa5a90e6c6f075e276991b5415bca95239f88c4d 100644 (file)
@@ -153,14 +153,7 @@ const char * const vm_guest_mode_string[] = {
 _Static_assert(sizeof(vm_guest_mode_string)/sizeof(char *) == NUM_VM_MODES,
               "Missing new mode strings?");
 
-struct vm_guest_mode_params {
-       unsigned int pa_bits;
-       unsigned int va_bits;
-       unsigned int page_size;
-       unsigned int page_shift;
-};
-
-static const struct vm_guest_mode_params vm_guest_mode_params[] = {
+const struct vm_guest_mode_params vm_guest_mode_params[] = {
        { 52, 48,  0x1000, 12 },
        { 52, 48, 0x10000, 16 },
        { 48, 48,  0x1000, 12 },
diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c
new file mode 100644 (file)
index 0000000..9be1944
--- /dev/null
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020, Google LLC.
+ */
+
+#include "kvm_util.h"
+#include "perf_test_util.h"
+#include "processor.h"
+
+struct perf_test_args perf_test_args;
+
+uint64_t guest_test_phys_mem;
+
+/*
+ * Guest virtual memory offset of the testing memory slot.
+ * Must not conflict with identity mapped test code.
+ */
+static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
+
+/*
+ * Continuously write to the first 8 bytes of each page in the
+ * specified region.
+ */
+static void guest_code(uint32_t vcpu_id)
+{
+       struct perf_test_vcpu_args *vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
+       uint64_t gva;
+       uint64_t pages;
+       int i;
+
+       /* Make sure vCPU args data structure is not corrupt. */
+       GUEST_ASSERT(vcpu_args->vcpu_id == vcpu_id);
+
+       gva = vcpu_args->gva;
+       pages = vcpu_args->pages;
+
+       while (true) {
+               for (i = 0; i < pages; i++) {
+                       uint64_t addr = gva + (i * perf_test_args.guest_page_size);
+
+                       if (i % perf_test_args.wr_fract == 0)
+                               *(uint64_t *)addr = 0x0123456789ABCDEF;
+                       else
+                               READ_ONCE(*(uint64_t *)addr);
+               }
+
+               GUEST_SYNC(1);
+       }
+}
+
+struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
+                                  uint64_t vcpu_memory_bytes)
+{
+       struct kvm_vm *vm;
+       uint64_t guest_num_pages;
+
+       pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
+
+       perf_test_args.host_page_size = getpagesize();
+       perf_test_args.guest_page_size = vm_guest_mode_params[mode].page_size;
+
+       guest_num_pages = vm_adjust_num_guest_pages(mode,
+                               (vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size);
+
+       TEST_ASSERT(vcpu_memory_bytes % perf_test_args.host_page_size == 0,
+                   "Guest memory size is not host page size aligned.");
+       TEST_ASSERT(vcpu_memory_bytes % perf_test_args.guest_page_size == 0,
+                   "Guest memory size is not guest page size aligned.");
+
+       vm = vm_create_with_vcpus(mode, vcpus,
+                                 (vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size,
+                                 0, guest_code, NULL);
+
+       perf_test_args.vm = vm;
+
+       /*
+        * If there should be more memory in the guest test region than there
+        * can be pages in the guest, it will definitely cause problems.
+        */
+       TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm),
+                   "Requested more guest memory than address space allows.\n"
+                   "    guest pages: %lx max gfn: %x vcpus: %d wss: %lx]\n",
+                   guest_num_pages, vm_get_max_gfn(vm), vcpus,
+                   vcpu_memory_bytes);
+
+       guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) *
+                             perf_test_args.guest_page_size;
+       guest_test_phys_mem &= ~(perf_test_args.host_page_size - 1);
+#ifdef __s390x__
+       /* Align to 1M (segment size) */
+       guest_test_phys_mem &= ~((1 << 20) - 1);
+#endif
+       pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
+
+       /* Add an extra memory slot for testing */
+       vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
+                                   guest_test_phys_mem,
+                                   PERF_TEST_MEM_SLOT_INDEX,
+                                   guest_num_pages, 0);
+
+       /* Do mapping for the demand paging memory slot */
+       virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0);
+
+       ucall_init(vm, NULL);
+
+       return vm;
+}
+
+void perf_test_destroy_vm(struct kvm_vm *vm)
+{
+       ucall_uninit(vm);
+       kvm_vm_free(vm);
+}
+
+void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes)
+{
+       vm_paddr_t vcpu_gpa;
+       struct perf_test_vcpu_args *vcpu_args;
+       int vcpu_id;
+
+       for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
+               vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
+
+               vcpu_args->vcpu_id = vcpu_id;
+               vcpu_args->gva = guest_test_virt_mem +
+                                (vcpu_id * vcpu_memory_bytes);
+               vcpu_args->pages = vcpu_memory_bytes /
+                                  perf_test_args.guest_page_size;
+
+               vcpu_gpa = guest_test_phys_mem + (vcpu_id * vcpu_memory_bytes);
+               pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
+                        vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_memory_bytes);
+       }
+}
index eb693a3b7b4a19a6b8f95d56135d910de23794f1..4c7d33618437c658bf014afff57da52b1a7d36f9 100755 (executable)
@@ -869,7 +869,7 @@ ipv6_torture()
        pid3=$!
        ip netns exec me ping -f 2001:db8:101::2 >/dev/null 2>&1 &
        pid4=$!
-       ip netns exec me mausezahn veth1 -B 2001:db8:101::2 -A 2001:db8:91::1 -c 0 -t tcp "dp=1-1023, flags=syn" >/dev/null 2>&1 &
+       ip netns exec me mausezahn -6 veth1 -B 2001:db8:101::2 -A 2001:db8:91::1 -c 0 -t tcp "dp=1-1023, flags=syn" >/dev/null 2>&1 &
        pid5=$!
 
        sleep 300
index 84205c3a55ebed11ddbbf307d771338e0f4d1ac5..2b5707738609ef313e533c78492e25bbc6a02313 100755 (executable)
@@ -1055,7 +1055,6 @@ ipv6_addr_metric_test()
 
        check_route6 "2001:db8:104::1 dev dummy2 proto kernel metric 260"
        log_test $? 0 "Set metric with peer route on local side"
-       log_test $? 0 "User specified metric on local address"
        check_route6 "2001:db8:104::2 dev dummy2 proto kernel metric 260"
        log_test $? 0 "Set metric with peer route on peer side"
 
index 464e31eabc7337076cb3a31c30e699be9f00e527..64cd2e23c5687a80a9b9afb32d5207102d38f122 100755 (executable)
 # - list_flush_ipv6_exception
 #      Using the same topology as in pmtu_ipv6, create exceptions, and check
 #      they are shown when listing exception caches, gone after flushing them
-
+#
+# - pmtu_ipv4_route_change
+#      Use the same topology as in pmtu_ipv4, but issue a route replacement
+#      command and delete the corresponding device afterward. This tests for
+#      proper cleanup of the PMTU exceptions by the route replacement path.
+#      Device unregistration should complete successfully
+#
+# - pmtu_ipv6_route_change
+#      Same as above but with IPv6
 
 # Kselftest framework requirement - SKIP code is 4.
 ksft_skip=4
@@ -224,7 +232,9 @@ tests="
        cleanup_ipv4_exception          ipv4: cleanup of cached exceptions      1
        cleanup_ipv6_exception          ipv6: cleanup of cached exceptions      1
        list_flush_ipv4_exception       ipv4: list and flush cached exceptions  1
-       list_flush_ipv6_exception       ipv6: list and flush cached exceptions  1"
+       list_flush_ipv6_exception       ipv6: list and flush cached exceptions  1
+       pmtu_ipv4_route_change          ipv4: PMTU exception w/route replace    1
+       pmtu_ipv6_route_change          ipv6: PMTU exception w/route replace    1"
 
 NS_A="ns-A"
 NS_B="ns-B"
@@ -1782,6 +1792,63 @@ test_list_flush_ipv6_exception() {
        return ${fail}
 }
 
+test_pmtu_ipvX_route_change() {
+       family=${1}
+
+       setup namespaces routing || return 2
+       trace "${ns_a}"  veth_A-R1    "${ns_r1}" veth_R1-A \
+             "${ns_r1}" veth_R1-B    "${ns_b}"  veth_B-R1 \
+             "${ns_a}"  veth_A-R2    "${ns_r2}" veth_R2-A \
+             "${ns_r2}" veth_R2-B    "${ns_b}"  veth_B-R2
+
+       if [ ${family} -eq 4 ]; then
+               ping=ping
+               dst1="${prefix4}.${b_r1}.1"
+               dst2="${prefix4}.${b_r2}.1"
+               gw="${prefix4}.${a_r1}.2"
+       else
+               ping=${ping6}
+               dst1="${prefix6}:${b_r1}::1"
+               dst2="${prefix6}:${b_r2}::1"
+               gw="${prefix6}:${a_r1}::2"
+       fi
+
+       # Set up initial MTU values
+       mtu "${ns_a}"  veth_A-R1 2000
+       mtu "${ns_r1}" veth_R1-A 2000
+       mtu "${ns_r1}" veth_R1-B 1400
+       mtu "${ns_b}"  veth_B-R1 1400
+
+       mtu "${ns_a}"  veth_A-R2 2000
+       mtu "${ns_r2}" veth_R2-A 2000
+       mtu "${ns_r2}" veth_R2-B 1500
+       mtu "${ns_b}"  veth_B-R2 1500
+
+       # Create route exceptions
+       run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst1}
+       run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst2}
+
+       # Check that exceptions have been created with the correct PMTU
+       pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})"
+       check_pmtu_value "1400" "${pmtu_1}" "exceeding MTU" || return 1
+       pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
+       check_pmtu_value "1500" "${pmtu_2}" "exceeding MTU" || return 1
+
+       # Replace the route from A to R1
+       run_cmd ${ns_a} ip route change default via ${gw}
+
+       # Delete the device in A
+       run_cmd ${ns_a} ip link del "veth_A-R1"
+}
+
+test_pmtu_ipv4_route_change() {
+       test_pmtu_ipvX_route_change 4
+}
+
+test_pmtu_ipv6_route_change() {
+       test_pmtu_ipvX_route_change 6
+}
+
 usage() {
        echo
        echo "$0 [OPTIONS] [TEST]..."
index cb0d1890a860fb91128f51c1029fce0fec502823..e0088c2d38a5d7ecf952bc8deb931c29b615d52b 100644 (file)
@@ -103,8 +103,8 @@ FIXTURE(tls)
 
 FIXTURE_VARIANT(tls)
 {
-       u16 tls_version;
-       u16 cipher_type;
+       uint16_t tls_version;
+       uint16_t cipher_type;
 };
 
 FIXTURE_VARIANT_ADD(tls, 12_gcm)
index ac2a30be9b325a4b68a1c1ce74d1fdfabb5b64be..f8a19f548ae9d5274a284e325ad2ded3be81a2cd 100755 (executable)
@@ -5,6 +5,14 @@
 
 readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
 
+# set global exit status, but never reset nonzero one.
+check_err()
+{
+       if [ $ret -eq 0 ]; then
+               ret=$1
+       fi
+}
+
 cleanup() {
        local -r jobs="$(jobs -p)"
        local -r ns="$(ip netns list|grep $PEER_NS)"
@@ -44,7 +52,9 @@ run_one() {
        # Hack: let bg programs complete the startup
        sleep 0.1
        ./udpgso_bench_tx ${tx_args}
+       ret=$?
        wait $(jobs -p)
+       return $ret
 }
 
 run_test() {
@@ -87,8 +97,10 @@ run_one_nat() {
 
        sleep 0.1
        ./udpgso_bench_tx ${tx_args}
+       ret=$?
        kill -INT $pid
        wait $(jobs -p)
+       return $ret
 }
 
 run_one_2sock() {
@@ -110,7 +122,9 @@ run_one_2sock() {
        sleep 0.1
        # first UDP GSO socket should be closed at this point
        ./udpgso_bench_tx ${tx_args}
+       ret=$?
        wait $(jobs -p)
+       return $ret
 }
 
 run_nat_test() {
@@ -131,36 +145,54 @@ run_all() {
        local -r core_args="-l 4"
        local -r ipv4_args="${core_args} -4 -D 192.168.1.1"
        local -r ipv6_args="${core_args} -6 -D 2001:db8::1"
+       ret=0
 
        echo "ipv4"
        run_test "no GRO" "${ipv4_args} -M 10 -s 1400" "-4 -n 10 -l 1400"
+       check_err $?
 
        # explicitly check we are not receiving UDP_SEGMENT cmsg (-S -1)
        # when GRO does not take place
        run_test "no GRO chk cmsg" "${ipv4_args} -M 10 -s 1400" "-4 -n 10 -l 1400 -S -1"
+       check_err $?
 
        # the GSO packets are aggregated because:
        # * veth schedule napi after each xmit
        # * segmentation happens in BH context, veth napi poll is delayed after
        #   the transmission of the last segment
        run_test "GRO" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720"
+       check_err $?
        run_test "GRO chk cmsg" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720 -S 1472"
+       check_err $?
        run_test "GRO with custom segment size" "${ipv4_args} -M 1 -s 14720 -S 500 " "-4 -n 1 -l 14720"
+       check_err $?
        run_test "GRO with custom segment size cmsg" "${ipv4_args} -M 1 -s 14720 -S 500 " "-4 -n 1 -l 14720 -S 500"
+       check_err $?
 
        run_nat_test "bad GRO lookup" "${ipv4_args} -M 1 -s 14720 -S 0" "-n 10 -l 1472"
+       check_err $?
        run_2sock_test "multiple GRO socks" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720 -S 1472"
+       check_err $?
 
        echo "ipv6"
        run_test "no GRO" "${ipv6_args} -M 10 -s 1400" "-n 10 -l 1400"
+       check_err $?
        run_test "no GRO chk cmsg" "${ipv6_args} -M 10 -s 1400" "-n 10 -l 1400 -S -1"
+       check_err $?
        run_test "GRO" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 1 -l 14520"
+       check_err $?
        run_test "GRO chk cmsg" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 1 -l 14520 -S 1452"
+       check_err $?
        run_test "GRO with custom segment size" "${ipv6_args} -M 1 -s 14520 -S 500" "-n 1 -l 14520"
+       check_err $?
        run_test "GRO with custom segment size cmsg" "${ipv6_args} -M 1 -s 14520 -S 500" "-n 1 -l 14520 -S 500"
+       check_err $?
 
        run_nat_test "bad GRO lookup" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 10 -l 1452"
+       check_err $?
        run_2sock_test "multiple GRO socks" "${ipv6_args} -M 1 -s 14520 -S 0 " "-n 1 -l 14520 -S 1452"
+       check_err $?
+       return $ret
 }
 
 if [ ! -f ../bpf/xdp_dummy.o ]; then
@@ -180,3 +212,5 @@ elif [[ $1 == "__subprocess_2sock" ]]; then
        shift
        run_one_2sock $@
 fi
+
+exit $?
index a374e10ef5065ffc70dd397f161c6c8af42ac54e..3006a8e5b41a1031af0231a7a17fa49f5f98edd3 100644 (file)
@@ -4,7 +4,8 @@
 TEST_PROGS := nft_trans_stress.sh nft_nat.sh bridge_brouter.sh \
        conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \
        nft_concat_range.sh nft_conntrack_helper.sh \
-       nft_queue.sh nft_meta.sh
+       nft_queue.sh nft_meta.sh \
+       ipip-conntrack-mtu.sh
 
 LDLIBS = -lmnl
 TEST_GEN_FILES =  nf-queue
diff --git a/tools/testing/selftests/netfilter/ipip-conntrack-mtu.sh b/tools/testing/selftests/netfilter/ipip-conntrack-mtu.sh
new file mode 100755 (executable)
index 0000000..4a6f5c3
--- /dev/null
@@ -0,0 +1,206 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+# Conntrack needs to reassemble fragments in order to have complete
+# packets for rule matching.  Reassembly can lead to packet loss.
+
+# Consider the following setup:
+#            +--------+       +---------+       +--------+
+#            |Router A|-------|Wanrouter|-------|Router B|
+#            |        |.IPIP..|         |..IPIP.|        |
+#            +--------+       +---------+       +--------+
+#           /                  mtu 1400                   \
+#          /                                               \
+#+--------+                                                 +--------+
+#|Client A|                                                 |Client B|
+#|        |                                                 |        |
+#+--------+                                                 +--------+
+
+# Router A and Router B use IPIP tunnel interfaces to tunnel traffic
+# between Client A and Client B over WAN. Wanrouter has MTU 1400 set
+# on its interfaces.
+
+rnd=$(mktemp -u XXXXXXXX)
+rx=$(mktemp)
+
+r_a="ns-ra-$rnd"
+r_b="ns-rb-$rnd"
+r_w="ns-rw-$rnd"
+c_a="ns-ca-$rnd"
+c_b="ns-cb-$rnd"
+
+checktool (){
+       if ! $1 > /dev/null 2>&1; then
+               echo "SKIP: Could not $2"
+               exit $ksft_skip
+       fi
+}
+
+checktool "iptables --version" "run test without iptables"
+checktool "ip -Version" "run test without ip tool"
+checktool "which nc" "run test without nc (netcat)"
+checktool "ip netns add ${r_a}" "create net namespace"
+
+for n in ${r_b} ${r_w} ${c_a} ${c_b};do
+       ip netns add ${n}
+done
+
+cleanup() {
+       for n in ${r_a} ${r_b} ${r_w} ${c_a} ${c_b};do
+               ip netns del ${n}
+       done
+       rm -f ${rx}
+}
+
+trap cleanup EXIT
+
+test_path() {
+       msg="$1"
+
+       ip netns exec ${c_b} nc -n -w 3 -q 3 -u -l -p 5000 > ${rx} < /dev/null &
+
+       sleep 1
+       for i in 1 2 3; do
+               head -c1400 /dev/zero | tr "\000" "a" | ip netns exec ${c_a} nc -n -w 1 -u 192.168.20.2 5000
+       done
+
+       wait
+
+       bytes=$(wc -c < ${rx})
+
+       if [ $bytes -eq 1400 ];then
+               echo "OK: PMTU $msg connection tracking"
+       else
+               echo "FAIL: PMTU $msg connection tracking: got $bytes, expected 1400"
+               exit 1
+       fi
+}
+
+# Detailed setup for Router A
+# ---------------------------
+# Interfaces:
+# eth0: 10.2.2.1/24
+# eth1: 192.168.10.1/24
+# ipip0: No IP address, local 10.2.2.1 remote 10.4.4.1
+# Routes:
+# 192.168.20.0/24 dev ipip0    (192.168.20.0/24 is subnet of Client B)
+# 10.4.4.1 via 10.2.2.254      (Router B via Wanrouter)
+# No iptables rules at all.
+
+ip link add veth0 netns ${r_a} type veth peer name veth0 netns ${r_w}
+ip link add veth1 netns ${r_a} type veth peer name veth0 netns ${c_a}
+
+l_addr="10.2.2.1"
+r_addr="10.4.4.1"
+ip netns exec ${r_a} ip link add ipip0 type ipip local ${l_addr} remote ${r_addr} mode ipip || exit $ksft_skip
+
+for dev in lo veth0 veth1 ipip0; do
+    ip -net ${r_a} link set $dev up
+done
+
+ip -net ${r_a} addr add 10.2.2.1/24 dev veth0
+ip -net ${r_a} addr add 192.168.10.1/24 dev veth1
+
+ip -net ${r_a} route add 192.168.20.0/24 dev ipip0
+ip -net ${r_a} route add 10.4.4.0/24 via 10.2.2.254
+
+ip netns exec ${r_a} sysctl -q net.ipv4.conf.all.forwarding=1 > /dev/null
+
+# Detailed setup for Router B
+# ---------------------------
+# Interfaces:
+# eth0: 10.4.4.1/24
+# eth1: 192.168.20.1/24
+# ipip0: No IP address, local 10.4.4.1 remote 10.2.2.1
+# Routes:
+# 192.168.10.0/24 dev ipip0    (192.168.10.0/24 is subnet of Client A)
+# 10.2.2.1 via 10.4.4.254      (Router A via Wanrouter)
+# No iptables rules at all.
+
+ip link add veth0 netns ${r_b} type veth peer name veth1 netns ${r_w}
+ip link add veth1 netns ${r_b} type veth peer name veth0 netns ${c_b}
+
+l_addr="10.4.4.1"
+r_addr="10.2.2.1"
+
+ip netns exec ${r_b} ip link add ipip0 type ipip local ${l_addr} remote ${r_addr} mode ipip || exit $ksft_skip
+
+for dev in lo veth0 veth1 ipip0; do
+       ip -net ${r_b} link set $dev up
+done
+
+ip -net ${r_b} addr add 10.4.4.1/24 dev veth0
+ip -net ${r_b} addr add 192.168.20.1/24 dev veth1
+
+ip -net ${r_b} route add 192.168.10.0/24 dev ipip0
+ip -net ${r_b} route add 10.2.2.0/24 via 10.4.4.254
+ip netns exec ${r_b} sysctl -q net.ipv4.conf.all.forwarding=1 > /dev/null
+
+# Client A
+ip -net ${c_a} addr add 192.168.10.2/24 dev veth0
+ip -net ${c_a} link set dev lo up
+ip -net ${c_a} link set dev veth0 up
+ip -net ${c_a} route add default via 192.168.10.1
+
+# Client A
+ip -net ${c_b} addr add 192.168.20.2/24 dev veth0
+ip -net ${c_b} link set dev veth0 up
+ip -net ${c_b} link set dev lo up
+ip -net ${c_b} route add default via 192.168.20.1
+
+# Wan
+ip -net ${r_w} addr add 10.2.2.254/24 dev veth0
+ip -net ${r_w} addr add 10.4.4.254/24 dev veth1
+
+ip -net ${r_w} link set dev lo up
+ip -net ${r_w} link set dev veth0 up mtu 1400
+ip -net ${r_w} link set dev veth1 up mtu 1400
+
+ip -net ${r_a} link set dev veth0 mtu 1400
+ip -net ${r_b} link set dev veth0 mtu 1400
+
+ip netns exec ${r_w} sysctl -q net.ipv4.conf.all.forwarding=1 > /dev/null
+
+# Path MTU discovery
+# ------------------
+# Running tracepath from Client A to Client B shows PMTU discovery is working
+# as expected:
+#
+# clienta:~# tracepath 192.168.20.2
+# 1?: [LOCALHOST]                      pmtu 1500
+# 1:  192.168.10.1                                          0.867ms
+# 1:  192.168.10.1                                          0.302ms
+# 2:  192.168.10.1                                          0.312ms pmtu 1480
+# 2:  no reply
+# 3:  192.168.10.1                                          0.510ms pmtu 1380
+# 3:  192.168.20.2                                          2.320ms reached
+# Resume: pmtu 1380 hops 3 back 3
+
+# ip netns exec ${c_a} traceroute --mtu 192.168.20.2
+
+# Router A has learned PMTU (1400) to Router B from Wanrouter.
+# Client A has learned PMTU (1400 - IPIP overhead = 1380) to Client B
+# from Router A.
+
+#Send large UDP packet
+#---------------------
+#Now we send a 1400 bytes UDP packet from Client A to Client B:
+
+# clienta:~# head -c1400 /dev/zero | tr "\000" "a" | nc -u 192.168.20.2 5000
+test_path "without"
+
+# The IPv4 stack on Client A already knows the PMTU to Client B, so the
+# UDP packet is sent as two fragments (1380 + 20). Router A forwards the
+# fragments between eth1 and ipip0. The fragments fit into the tunnel and
+# reach their destination.
+
+#When sending the large UDP packet again, Router A now reassembles the
+#fragments before routing the packet over ipip0. The resulting IPIP
+#packet is too big (1400) for the tunnel PMTU (1380) to Router B, it is
+#dropped on Router A before sending.
+
+ip netns exec ${r_a} iptables -A FORWARD -m conntrack --ctstate NEW
+test_path "with"
index edf0a48da6bf80650720471cc3913521330073d7..bf6b9626c7dd2749547cb13062188d4bb834c5b6 100755 (executable)
@@ -94,7 +94,13 @@ check_for_helper()
        local message=$2
        local port=$3
 
-       ip netns exec ${netns} conntrack -L -p tcp --dport $port 2> /dev/null |grep -q 'helper=ftp'
+       if echo $message |grep -q 'ipv6';then
+               local family="ipv6"
+       else
+               local family="ipv4"
+       fi
+
+       ip netns exec ${netns} conntrack -L -f $family -p tcp --dport $port 2> /dev/null |grep -q 'helper=ftp'
        if [ $? -ne 0 ] ; then
                echo "FAIL: ${netns} did not show attached helper $message" 1>&2
                ret=1
@@ -111,8 +117,8 @@ test_helper()
 
        sleep 3 | ip netns exec ${ns2} nc -w 2 -l -p $port > /dev/null &
 
-       sleep 1
        sleep 1 | ip netns exec ${ns1} nc -w 2 10.0.1.2 $port > /dev/null &
+       sleep 1
 
        check_for_helper "$ns1" "ip $msg" $port
        check_for_helper "$ns2" "ip $msg" $port
@@ -128,8 +134,8 @@ test_helper()
 
        sleep 3 | ip netns exec ${ns2} nc -w 2 -6 -l -p $port > /dev/null &
 
-       sleep 1
        sleep 1 | ip netns exec ${ns1} nc -w 2 -6 dead:1::2 $port > /dev/null &
+       sleep 1
 
        check_for_helper "$ns1" "ipv6 $msg" $port
        check_for_helper "$ns2" "ipv6 $msg" $port
index 5eb64d41e54199b47c0b05a1db9f5a37979c17ca..a8dc51af5a9c0c5e5e897163191ebd6e06d9b865 100644 (file)
@@ -1,5 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0-only
 vdso_test
+vdso_test_abi
+vdso_test_clock_getres
+vdso_test_correctness
 vdso_test_gettimeofday
 vdso_test_getcpu
 vdso_standalone_test_x86
index 5029ef9b228c3867efa13d2f81341ba3412ac3a2..c4aea794725a7e502b1334efdfdc512446b40c52 100644 (file)
@@ -349,7 +349,7 @@ static void test_one_clock_gettime64(int clock, const char *name)
                return;
        }
 
-       printf("\t%llu.%09ld %llu.%09ld %llu.%09ld\n",
+       printf("\t%llu.%09lld %llu.%09lld %llu.%09lld\n",
               (unsigned long long)start.tv_sec, start.tv_nsec,
               (unsigned long long)vdso.tv_sec, vdso.tv_nsec,
               (unsigned long long)end.tv_sec, end.tv_nsec);
index 9a25307f6115484c5d1d2425751cac8f1673390a..d42115e4284d75fea30850c19b3c172ff52f4a49 100644 (file)
@@ -4,7 +4,7 @@
 include local_config.mk
 
 uname_M := $(shell uname -m 2>/dev/null || echo not)
-MACHINE ?= $(shell echo $(uname_M) | sed -e 's/aarch64.*/arm64/')
+MACHINE ?= $(shell echo $(uname_M) | sed -e 's/aarch64.*/arm64/' -e 's/ppc64.*/ppc64/')
 
 # Without this, failed build products remain, with up-to-date timestamps,
 # thus tricking Make (and you!) into believing that All Is Well, in subsequent
@@ -43,7 +43,7 @@ TEST_GEN_FILES += thuge-gen
 TEST_GEN_FILES += transhuge-stress
 TEST_GEN_FILES += userfaultfd
 
-ifeq ($(ARCH),x86_64)
+ifeq ($(MACHINE),x86_64)
 CAN_BUILD_I386 := $(shell ./../x86/check_cc.sh $(CC) ../x86/trivial_32bit_program.c -m32)
 CAN_BUILD_X86_64 := $(shell ./../x86/check_cc.sh $(CC) ../x86/trivial_64bit_program.c)
 CAN_BUILD_WITH_NOPIE := $(shell ./../x86/check_cc.sh $(CC) ../x86/trivial_program.c -no-pie)
@@ -65,13 +65,13 @@ TEST_GEN_FILES += $(BINARIES_64)
 endif
 else
 
-ifneq (,$(findstring $(ARCH),powerpc))
+ifneq (,$(findstring $(MACHINE),ppc64))
 TEST_GEN_FILES += protection_keys
 endif
 
 endif
 
-ifneq (,$(filter $(MACHINE),arm64 ia64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sh64 sparc64 x86_64))
+ifneq (,$(filter $(MACHINE),arm64 ia64 mips64 parisc64 ppc64 riscv64 s390x sh64 sparc64 x86_64))
 TEST_GEN_FILES += va_128TBswitch
 TEST_GEN_FILES += virtual_address_range
 TEST_GEN_FILES += write_to_hugetlbfs
@@ -84,7 +84,7 @@ TEST_FILES := test_vmalloc.sh
 KSFT_KHDR_INSTALL := 1
 include ../lib.mk
 
-ifeq ($(ARCH),x86_64)
+ifeq ($(MACHINE),x86_64)
 BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32))
 BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64))
 
index b50c2085c1ac0f1e50abc219a2bdab60b2b790bf..fe07d97df9fa89044d6493452226b0d31c3534c1 100644 (file)
@@ -1,5 +1,4 @@
 CONFIG_LOCALVERSION="-debug"
-CONFIG_ENABLE_MUST_CHECK=y
 CONFIG_FRAME_POINTER=y
 CONFIG_STACK_VALIDATION=y
 CONFIG_DEBUG_KERNEL=y
index 5f260488e999b9331351a325c3b9caa755e0b744..fa9e3614d30edb150f6b8f7a071f0e351657dd08 100644 (file)
@@ -485,9 +485,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
        kvm->mmu_notifier_count++;
        need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end,
                                             range->flags);
-       need_tlb_flush |= kvm->tlbs_dirty;
        /* we've to flush the tlb before the pages can be freed */
-       if (need_tlb_flush)
+       if (need_tlb_flush || kvm->tlbs_dirty)
                kvm_flush_remote_tlbs(kvm);
 
        spin_unlock(&kvm->mmu_lock);