Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
authorJakub Kicinski <kuba@kernel.org>
Wed, 27 Apr 2022 22:18:39 +0000 (15:18 -0700)
committerJakub Kicinski <kuba@kernel.org>
Wed, 27 Apr 2022 22:18:40 +0000 (15:18 -0700)
Daniel Borkmann says:

====================
pull-request: bpf 2022-04-27

We've added 5 non-merge commits during the last 20 day(s) which contain
a total of 6 files changed, 34 insertions(+), 12 deletions(-).

The main changes are:

1) Fix xsk sockets when rx and tx are separately bound to the same umem, also
   fix xsk copy mode combined with busy poll, from Maciej Fijalkowski.

2) Fix BPF tunnel/collect_md helpers with bpf_xmit lwt hook usage which triggered
   a crash due to invalid metadata_dst access, from Eyal Birger.

3) Fix release of page pool in XDP live packet mode, from Toke Høiland-Jørgensen.

4) Fix potential NULL pointer dereference in kretprobes, from Adam Zabrocki.

   (Masami & Steven preferred this small fix to be routed via bpf tree given it's
    follow-up fix to Masami's rethook work that went via bpf earlier, too.)

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  xsk: Fix possible crash when multiple sockets are created
  kprobes: Fix KRETPROBES when CONFIG_KRETPROBE_ON_RETHOOK is set
  bpf, lwt: Fix crash when using bpf_skb_set_tunnel_key() from bpf_xmit lwt hook
  bpf: Fix release of page_pool in BPF_PROG_RUN in test runner
  xsk: Fix l2fwd for copy mode + busy poll combo
====================

Link: https://lore.kernel.org/r/20220427212748.9576-1-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
1488 files changed:
.mailmap
Documentation/ABI/testing/sysfs-class-firmware-attributes
Documentation/ABI/testing/sysfs-driver-intel_sdsi
Documentation/ABI/testing/sysfs-fs-erofs
Documentation/PCI/pci.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/core-api/xarray.rst
Documentation/dev-tools/kunit/architecture.rst
Documentation/dev-tools/kunit/start.rst
Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt
Documentation/devicetree/bindings/arm/psci.yaml
Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml
Documentation/devicetree/bindings/bus/ti-sysc.yaml
Documentation/devicetree/bindings/clock/samsung,exynos-audss-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos-ext-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos4412-isp-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos5260-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos5410-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos5433-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos7-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos7885-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos850-clock.yaml
Documentation/devicetree/bindings/clock/samsung,s2mps11.yaml
Documentation/devicetree/bindings/clock/samsung,s5pv210-audss-clock.yaml
Documentation/devicetree/bindings/clock/samsung,s5pv210-clock.yaml
Documentation/devicetree/bindings/cpu/idle-states.yaml [moved from Documentation/devicetree/bindings/arm/idle-states.yaml with 74% similarity]
Documentation/devicetree/bindings/devfreq/event/samsung,exynos-nocp.yaml
Documentation/devicetree/bindings/devfreq/event/samsung,exynos-ppmu.yaml
Documentation/devicetree/bindings/display/bridge/chipone,icn6211.yaml
Documentation/devicetree/bindings/display/bridge/toshiba,tc358762.yaml
Documentation/devicetree/bindings/display/msm/dpu-qcm2290.yaml
Documentation/devicetree/bindings/display/panel/panel-mipi-dbi-spi.yaml
Documentation/devicetree/bindings/display/panel/panel-timing.yaml
Documentation/devicetree/bindings/display/samsung/samsung,exynos-hdmi-ddc.yaml
Documentation/devicetree/bindings/display/samsung/samsung,exynos-hdmi.yaml
Documentation/devicetree/bindings/display/samsung/samsung,exynos-mixer.yaml
Documentation/devicetree/bindings/display/samsung/samsung,exynos5433-decon.yaml
Documentation/devicetree/bindings/display/samsung/samsung,exynos5433-mic.yaml
Documentation/devicetree/bindings/display/samsung/samsung,exynos7-decon.yaml
Documentation/devicetree/bindings/display/samsung/samsung,fimd.yaml
Documentation/devicetree/bindings/extcon/maxim,max77843.yaml
Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
Documentation/devicetree/bindings/hwmon/lltc,ltc4151.yaml
Documentation/devicetree/bindings/hwmon/microchip,mcp3021.yaml
Documentation/devicetree/bindings/hwmon/sensirion,sht15.yaml
Documentation/devicetree/bindings/hwmon/ti,tmp102.yaml
Documentation/devicetree/bindings/hwmon/ti,tmp108.yaml
Documentation/devicetree/bindings/hwmon/ti,tmp464.yaml
Documentation/devicetree/bindings/i2c/i2c-exynos5.yaml
Documentation/devicetree/bindings/i2c/samsung,s3c2410-i2c.yaml
Documentation/devicetree/bindings/iio/adc/adi,ad7476.yaml
Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
Documentation/devicetree/bindings/iio/dac/adi,ad5360.yaml
Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/input/mtk-pmic-keys.txt
Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml
Documentation/devicetree/bindings/interrupt-controller/mrvl,intc.yaml
Documentation/devicetree/bindings/interrupt-controller/samsung,exynos4210-combiner.yaml
Documentation/devicetree/bindings/leds/maxim,max77693.yaml
Documentation/devicetree/bindings/media/coda.yaml
Documentation/devicetree/bindings/media/mediatek,vcodec-decoder.yaml
Documentation/devicetree/bindings/media/mediatek,vcodec-encoder.yaml
Documentation/devicetree/bindings/media/mediatek,vcodec-subdev-decoder.yaml
Documentation/devicetree/bindings/memory-controllers/brcm,dpfe-cpu.yaml
Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr2-timings.yaml
Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr2.yaml
Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr3-timings.yaml
Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr3.yaml
Documentation/devicetree/bindings/memory-controllers/marvell,mvebu-sdram-controller.yaml
Documentation/devicetree/bindings/memory-controllers/qca,ath79-ddr-controller.yaml
Documentation/devicetree/bindings/memory-controllers/renesas,h8300-bsc.yaml
Documentation/devicetree/bindings/memory-controllers/samsung,exynos5422-dmc.yaml
Documentation/devicetree/bindings/memory-controllers/synopsys,ddrc-ecc.yaml
Documentation/devicetree/bindings/memory-controllers/ti,da8xx-ddrctl.yaml
Documentation/devicetree/bindings/mfd/maxim,max14577.yaml
Documentation/devicetree/bindings/mfd/maxim,max77686.yaml
Documentation/devicetree/bindings/mfd/maxim,max77693.yaml
Documentation/devicetree/bindings/mfd/maxim,max77802.yaml
Documentation/devicetree/bindings/mfd/maxim,max77843.yaml
Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.yaml
Documentation/devicetree/bindings/mfd/samsung,s2mpa01.yaml
Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml
Documentation/devicetree/bindings/mfd/samsung,s5m8767.yaml
Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.yaml
Documentation/devicetree/bindings/net/dsa/realtek.yaml
Documentation/devicetree/bindings/net/nfc/marvell,nci.yaml
Documentation/devicetree/bindings/net/nfc/nxp,nci.yaml
Documentation/devicetree/bindings/net/nfc/nxp,pn532.yaml
Documentation/devicetree/bindings/net/nfc/nxp,pn544.yaml
Documentation/devicetree/bindings/net/nfc/st,st-nci.yaml
Documentation/devicetree/bindings/net/nfc/st,st21nfca.yaml
Documentation/devicetree/bindings/net/nfc/st,st95hf.yaml
Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml
Documentation/devicetree/bindings/net/snps,dwmac.yaml
Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml
Documentation/devicetree/bindings/net/ti,davinci-mdio.yaml
Documentation/devicetree/bindings/phy/nvidia,tegra20-usb-phy.yaml
Documentation/devicetree/bindings/phy/qcom,usb-hs-phy.yaml
Documentation/devicetree/bindings/phy/samsung,dp-video-phy.yaml
Documentation/devicetree/bindings/phy/samsung,exynos-hdmi-phy.yaml
Documentation/devicetree/bindings/phy/samsung,exynos5250-sata-phy.yaml
Documentation/devicetree/bindings/phy/samsung,mipi-video-phy.yaml
Documentation/devicetree/bindings/phy/samsung,usb2-phy.yaml
Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
Documentation/devicetree/bindings/pinctrl/cirrus,madera.yaml
Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-gpio-bank.yaml
Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-pins-cfg.yaml
Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-wakeup-interrupt.yaml
Documentation/devicetree/bindings/pinctrl/samsung,pinctrl.yaml
Documentation/devicetree/bindings/power/renesas,apmu.yaml
Documentation/devicetree/bindings/power/supply/bq2415x.yaml
Documentation/devicetree/bindings/power/supply/maxim,max14577.yaml
Documentation/devicetree/bindings/power/supply/maxim,max77693.yaml
Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
Documentation/devicetree/bindings/regulator/maxim,max14577.yaml
Documentation/devicetree/bindings/regulator/maxim,max77686.yaml
Documentation/devicetree/bindings/regulator/maxim,max77693.yaml
Documentation/devicetree/bindings/regulator/maxim,max77802.yaml
Documentation/devicetree/bindings/regulator/maxim,max77843.yaml
Documentation/devicetree/bindings/regulator/maxim,max8952.yaml
Documentation/devicetree/bindings/regulator/maxim,max8973.yaml
Documentation/devicetree/bindings/regulator/maxim,max8997.yaml
Documentation/devicetree/bindings/regulator/samsung,s2mpa01.yaml
Documentation/devicetree/bindings/regulator/samsung,s2mps11.yaml
Documentation/devicetree/bindings/regulator/samsung,s2mps13.yaml
Documentation/devicetree/bindings/regulator/samsung,s2mps14.yaml
Documentation/devicetree/bindings/regulator/samsung,s2mps15.yaml
Documentation/devicetree/bindings/regulator/samsung,s2mpu02.yaml
Documentation/devicetree/bindings/regulator/samsung,s5m8767.yaml
Documentation/devicetree/bindings/remoteproc/qcom,sc7280-wpss-pil.yaml
Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.yaml
Documentation/devicetree/bindings/reset/socionext,uniphier-reset.yaml
Documentation/devicetree/bindings/riscv/cpus.yaml
Documentation/devicetree/bindings/rng/samsung,exynos5250-trng.yaml
Documentation/devicetree/bindings/rng/timeriomem_rng.yaml
Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml
Documentation/devicetree/bindings/rtc/atmel,at91sam9-rtc.txt [deleted file]
Documentation/devicetree/bindings/rtc/atmel,at91sam9260-rtt.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml
Documentation/devicetree/bindings/sound/samsung,arndale.yaml
Documentation/devicetree/bindings/sound/samsung,smdk5250.yaml
Documentation/devicetree/bindings/sound/samsung,snow.yaml
Documentation/devicetree/bindings/sound/samsung,tm2.yaml
Documentation/devicetree/bindings/sound/st,stm32-sai.yaml
Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml
Documentation/devicetree/bindings/spi/samsung,spi-peripheral-props.yaml
Documentation/devicetree/bindings/spi/samsung,spi.yaml
Documentation/devicetree/bindings/sram/sram.yaml
Documentation/devicetree/bindings/thermal/samsung,exynos-thermal.yaml
Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml
Documentation/devicetree/bindings/usb/samsung,exynos-usb2.yaml
Documentation/devicetree/bindings/vendor-prefixes.yaml
Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
Documentation/driver-api/dma-buf.rst
Documentation/filesystems/caching/backend-api.rst
Documentation/filesystems/caching/netfs-api.rst
Documentation/filesystems/cifs/ksmbd.rst
Documentation/filesystems/fsverity.rst
Documentation/filesystems/locking.rst
Documentation/filesystems/netfs_library.rst
Documentation/filesystems/vfs.rst
Documentation/kbuild/kbuild.rst
Documentation/kbuild/llvm.rst
Documentation/kbuild/makefiles.rst
Documentation/locking/locktypes.rst
Documentation/maintainer/index.rst
Documentation/maintainer/messy-diffstat.rst [new file with mode: 0644]
Documentation/networking/bonding.rst
Documentation/networking/ip-sysctl.rst
Documentation/riscv/index.rst
Documentation/sphinx/kernel_abi.py
Documentation/sphinx/kernel_feat.py
Documentation/sphinx/kernel_include.py
Documentation/sphinx/kerneldoc.py
Documentation/sphinx/kfigure.py
Documentation/sphinx/requirements.txt
Documentation/trace/user_events.rst
Documentation/virt/kvm/api.rst
Documentation/virt/kvm/index.rst
Documentation/virt/kvm/locking.rst
Documentation/virt/kvm/s390/index.rst [new file with mode: 0644]
Documentation/virt/kvm/s390/s390-diag.rst [moved from Documentation/virt/kvm/s390-diag.rst with 100% similarity]
Documentation/virt/kvm/s390/s390-pv-boot.rst [moved from Documentation/virt/kvm/s390-pv-boot.rst with 100% similarity]
Documentation/virt/kvm/s390/s390-pv.rst [moved from Documentation/virt/kvm/s390-pv.rst with 100% similarity]
Documentation/virt/kvm/vcpu-requests.rst
Documentation/virt/kvm/x86/amd-memory-encryption.rst [moved from Documentation/virt/kvm/amd-memory-encryption.rst with 99% similarity]
Documentation/virt/kvm/x86/cpuid.rst [moved from Documentation/virt/kvm/cpuid.rst with 100% similarity]
Documentation/virt/kvm/x86/errata.rst [new file with mode: 0644]
Documentation/virt/kvm/x86/halt-polling.rst [moved from Documentation/virt/kvm/halt-polling.rst with 100% similarity]
Documentation/virt/kvm/x86/hypercalls.rst [moved from Documentation/virt/kvm/hypercalls.rst with 100% similarity]
Documentation/virt/kvm/x86/index.rst [new file with mode: 0644]
Documentation/virt/kvm/x86/mmu.rst [moved from Documentation/virt/kvm/mmu.rst with 100% similarity]
Documentation/virt/kvm/x86/msr.rst [moved from Documentation/virt/kvm/msr.rst with 100% similarity]
Documentation/virt/kvm/x86/nested-vmx.rst [moved from Documentation/virt/kvm/nested-vmx.rst with 100% similarity]
Documentation/virt/kvm/x86/running-nested-guests.rst [moved from Documentation/virt/kvm/running-nested-guests.rst with 99% similarity]
Documentation/virt/kvm/x86/timekeeping.rst [moved from Documentation/virt/kvm/timekeeping.rst with 100% similarity]
Documentation/virt/uml/user_mode_linux_howto_v2.rst
Documentation/vm/page_owner.rst
Documentation/vm/unevictable-lru.rst
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/include/asm/user.h
arch/alpha/kernel/syscalls/Makefile
arch/arm/boot/compressed/Makefile
arch/arm/boot/dts/at91-kizbox3-hs.dts
arch/arm/boot/dts/at91-kizbox3_common.dtsi
arch/arm/boot/dts/at91-sam9_l9260.dts
arch/arm/boot/dts/da850-evm.dts
arch/arm/boot/dts/dm8168-evm.dts
arch/arm/boot/dts/imx28-ts4600.dts
arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
arch/arm/boot/dts/imx6ul-phytec-segin-peb-av-02.dtsi
arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
arch/arm/boot/dts/qcom-apq8064-pins.dtsi
arch/arm/boot/dts/qcom-ipq8064.dtsi
arch/arm/boot/dts/spear1310-evb.dts
arch/arm/boot/dts/spear1340-evb.dts
arch/arm/boot/dts/spear1340.dtsi
arch/arm/boot/dts/spear13xx.dtsi
arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi
arch/arm/boot/dts/stm32mp157c-ev1.dts
arch/arm/configs/gemini_defconfig
arch/arm/configs/imote2_defconfig [deleted file]
arch/arm/configs/u8500_defconfig
arch/arm/include/asm/user.h
arch/arm/kernel/setup.c
arch/arm/kernel/stacktrace.c
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-ep93xx/clock.c
arch/arm/mach-iop32x/cp6.c
arch/arm/mach-omap2/omap-secure.c
arch/arm/mach-s3c/mach-jive.c
arch/arm/mach-vexpress/spc.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/mm.h
arch/arm/tools/Makefile
arch/arm64/boot/dts/amd/Makefile
arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts
arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts
arch/arm64/boot/dts/amd/amd-overdrive.dts [deleted file]
arch/arm64/boot/dts/amd/amd-seattle-cpus.dtsi [new file with mode: 0644]
arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi
arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi
arch/arm64/boot/dts/amd/husky.dts [deleted file]
arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
arch/arm64/boot/dts/nvidia/tegra186-p3509-0000+p3636-0001.dts
arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi
arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
arch/arm64/boot/dts/nvidia/tegra210-p2894.dtsi
arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
arch/arm64/boot/dts/qcom/msm8996.dtsi
arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/el2_setup.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/kernel/alternative.c
arch/arm64/kernel/elfcore.c
arch/arm64/kernel/hw_breakpoint.c
arch/arm64/kernel/module-plts.c
arch/arm64/kernel/patching.c
arch/arm64/kernel/proton-pack.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/suspend.c
arch/arm64/kvm/mmu.c
arch/arm64/kvm/psci.c
arch/arm64/kvm/reset.c
arch/arm64/kvm/vgic/vgic-debug.c
arch/arm64/kvm/vgic/vgic-its.c
arch/arm64/mm/init.c
arch/h8300/include/asm/user.h
arch/ia64/include/asm/user.h
arch/ia64/kernel/syscalls/Makefile
arch/m68k/include/asm/user.h
arch/m68k/kernel/syscalls/Makefile
arch/microblaze/boot/Makefile
arch/microblaze/boot/dts/Makefile
arch/microblaze/kernel/syscalls/Makefile
arch/mips/crypto/crc32-mips.c
arch/mips/include/asm/mach-rc32434/rb.h
arch/mips/kernel/syscalls/Makefile
arch/mips/lantiq/falcon/sysctrl.c
arch/mips/lantiq/xway/gptu.c
arch/mips/lantiq/xway/sysctrl.c
arch/mips/rb532/gpio.c
arch/mips/sgi-ip22/ip22-gio.c
arch/parisc/kernel/syscalls/Makefile
arch/powerpc/include/asm/kvm_book3s_64.h
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/include/asm/page.h
arch/powerpc/include/asm/setup.h
arch/powerpc/include/asm/static_call.h
arch/powerpc/include/asm/user.h
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/module.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/syscalls/Makefile
arch/powerpc/kvm/Kconfig
arch/powerpc/kvm/book3s_64_entry.S
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/book3s_pr_papr.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/mm/mem.c
arch/powerpc/mm/numa.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/platforms/pseries/vas-sysfs.c
arch/riscv/Kconfig
arch/riscv/Kconfig.socs
arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
arch/riscv/configs/defconfig
arch/riscv/configs/nommu_k210_defconfig
arch/riscv/configs/nommu_k210_sdcard_defconfig
arch/riscv/configs/nommu_virt_defconfig
arch/riscv/configs/rv32_defconfig
arch/riscv/include/asm/asm.h
arch/riscv/include/asm/cpuidle.h [new file with mode: 0644]
arch/riscv/include/asm/current.h
arch/riscv/include/asm/module.lds.h
arch/riscv/include/asm/suspend.h [new file with mode: 0644]
arch/riscv/include/asm/thread_info.h
arch/riscv/kernel/Makefile
arch/riscv/kernel/asm-offsets.c
arch/riscv/kernel/cpu.c
arch/riscv/kernel/cpu_ops_sbi.c
arch/riscv/kernel/head.S
arch/riscv/kernel/module.c
arch/riscv/kernel/perf_callchain.c
arch/riscv/kernel/process.c
arch/riscv/kernel/stacktrace.c
arch/riscv/kernel/suspend.c [new file with mode: 0644]
arch/riscv/kernel/suspend_entry.S [new file with mode: 0644]
arch/riscv/kvm/vcpu.c
arch/riscv/kvm/vcpu_fp.c
arch/s390/Kconfig
arch/s390/Makefile
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/include/asm/alternative-asm.h
arch/s390/include/asm/alternative.h
arch/s390/include/asm/ap.h
arch/s390/include/asm/ctl_reg.h
arch/s390/include/asm/entry-common.h
arch/s390/include/asm/processor.h
arch/s390/include/asm/spinlock.h
arch/s390/include/asm/stacktrace.h
arch/s390/include/asm/syscall_wrapper.h
arch/s390/include/asm/unwind.h
arch/s390/include/asm/user.h
arch/s390/kernel/entry.S
arch/s390/kernel/ipl.c
arch/s390/kernel/kprobes.c
arch/s390/kernel/machine_kexec.c
arch/s390/kernel/os_info.c
arch/s390/kernel/processor.c
arch/s390/kernel/setup.c
arch/s390/kernel/smp.c
arch/s390/kernel/syscalls/Makefile
arch/s390/kernel/traps.c
arch/s390/kernel/unwind_bc.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/pv.c
arch/s390/lib/spinlock.c
arch/s390/lib/test_unwind.c
arch/s390/pci/pci.c
arch/s390/pci/pci_bus.h
arch/s390/pci/pci_clp.c
arch/s390/pci/pci_event.c
arch/sh/include/asm/user.h
arch/sh/kernel/syscalls/Makefile
arch/sparc/kernel/syscalls/Makefile
arch/um/drivers/mconsole_kern.c
arch/um/drivers/port_user.c
arch/um/drivers/ubd_kern.c
arch/um/drivers/vector_kern.c
arch/um/drivers/vector_kern.h
arch/um/drivers/vector_user.c
arch/um/drivers/vector_user.h
arch/um/include/asm/Kbuild
arch/um/include/asm/xor.h
arch/um/include/shared/os.h
arch/um/kernel/dtb.c
arch/um/os-Linux/file.c
arch/um/os-Linux/helper.c
arch/um/os-Linux/time.c
arch/x86/Kconfig
arch/x86/configs/i386_defconfig
arch/x86/configs/x86_64_defconfig
arch/x86/entry/syscalls/Makefile
arch/x86/events/intel/core.c
arch/x86/events/intel/cstate.c
arch/x86/events/intel/uncore.c
arch/x86/events/intel/uncore_snb.c
arch/x86/events/msr.c
arch/x86/include/asm/asm.h
arch/x86/include/asm/bug.h
arch/x86/include/asm/compat.h
arch/x86/include/asm/io.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/msi.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/percpu.h
arch/x86/include/asm/perf_event.h
arch/x86/include/asm/static_call.h
arch/x86/include/asm/svm.h
arch/x86/include/asm/user_32.h
arch/x86/include/asm/user_64.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpu.h
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/tsx.c
arch/x86/kernel/crash_dump_64.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/kvm.c
arch/x86/kernel/static_call.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/emulate.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/hyperv.h
arch/x86/kvm/i8254.c
arch/x86/kvm/kvm_emulate.h
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/mmu/tdp_mmu.h
arch/x86/kvm/pmu.c
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/pmu.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/svm/svm_onhyperv.c
arch/x86/kvm/trace.h
arch/x86/kvm/vmx/pmu_intel.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/xen.c
arch/x86/lib/csum-partial_64.c
arch/x86/lib/iomem.c
arch/x86/lib/usercopy_64.c
arch/x86/mm/tlb.c
arch/x86/net/bpf_jit_comp.c
arch/x86/power/cpu.c
arch/x86/um/Kconfig
arch/x86/um/shared/sysdep/syscalls_64.h
arch/x86/um/syscalls_64.c
arch/xtensa/kernel/coprocessor.S
arch/xtensa/kernel/jump_label.c
arch/xtensa/kernel/syscalls/Makefile
arch/xtensa/platforms/iss/console.c
block/bio.c
block/blk-cgroup.c
block/blk-ioc.c
block/blk-mq.c
block/blk-wbt.h
block/genhd.c
block/ioctl.c
certs/Makefile
certs/system_certificates.S
drivers/acpi/acpi_ipmi.c
drivers/acpi/apei/apei-base.c
drivers/acpi/cppc_acpi.c
drivers/acpi/processor_idle.c
drivers/acpi/scan.c
drivers/acpi/tables.c
drivers/ata/Kconfig
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/libata-core.c
drivers/ata/libata-sff.c
drivers/ata/sata_dwc_460ex.c
drivers/auxdisplay/lcd2s.c
drivers/base/dd.c
drivers/block/drbd/drbd_int.h
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_nl.c
drivers/block/drbd/drbd_req.c
drivers/block/drbd/drbd_state.c
drivers/block/drbd/drbd_state_change.h
drivers/block/loop.c
drivers/block/n64cart.c
drivers/block/null_blk/main.c
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkfront.c
drivers/cdrom/cdrom.c
drivers/char/Kconfig
drivers/char/random.c
drivers/clk/clk.c
drivers/clk/clk_test.c
drivers/clk/sunxi-ng/Kconfig
drivers/clk/sunxi-ng/Makefile
drivers/clk/sunxi-ng/ccu-sun6i-rtc.c [new file with mode: 0644]
drivers/clk/sunxi-ng/ccu-sun6i-rtc.h [new file with mode: 0644]
drivers/clk/sunxi-ng/ccu_common.h
drivers/clk/sunxi-ng/ccu_mux.c
drivers/cpuidle/Kconfig
drivers/cpuidle/Kconfig.arm
drivers/cpuidle/Kconfig.riscv [new file with mode: 0644]
drivers/cpuidle/Makefile
drivers/cpuidle/cpuidle-psci-domain.c
drivers/cpuidle/cpuidle-psci.h
drivers/cpuidle/cpuidle-riscv-sbi.c [new file with mode: 0644]
drivers/cpuidle/dt_idle_genpd.c [new file with mode: 0644]
drivers/cpuidle/dt_idle_genpd.h [new file with mode: 0644]
drivers/crypto/virtio/Kconfig
drivers/crypto/virtio/Makefile
drivers/crypto/virtio/virtio_crypto_akcipher_algs.c [new file with mode: 0644]
drivers/crypto/virtio/virtio_crypto_common.h
drivers/crypto/virtio/virtio_crypto_core.c
drivers/crypto/virtio/virtio_crypto_mgr.c
drivers/crypto/virtio/virtio_crypto_skcipher_algs.c [moved from drivers/crypto/virtio/virtio_crypto_algs.c with 99% similarity]
drivers/cxl/pci.c
drivers/dma-buf/Makefile
drivers/dma-buf/dma-fence-array.c
drivers/dma-buf/selftests.h
drivers/dma-buf/st-dma-fence-unwrap.c [new file with mode: 0644]
drivers/dma-buf/sync_file.c
drivers/firmware/arm_scmi/clock.c
drivers/firmware/arm_scmi/driver.c
drivers/firmware/arm_scmi/optee.c
drivers/gpio/gpio-sim.c
drivers/gpio/gpio-ts4900.c
drivers/gpio/gpio-ts5500.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/ObjectID.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_events.c
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_stream.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
drivers/gpu/drm/drm_of.c
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/imx/dw_hdmi-imx.c
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/imx/parallel-display.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/adreno_device.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
drivers/gpu/drm/msm/dp/dp_display.c
drivers/gpu/drm/msm/dp/dp_panel.c
drivers/gpu/drm/msm/dp/dp_panel.h
drivers/gpu/drm/msm/dsi/dsi_manager.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
drivers/gpu/drm/panel/panel-ilitek-ili9341.c
drivers/gpu/ipu-v3/ipu-di.c
drivers/hid/Kconfig
drivers/hid/Makefile
drivers/hid/hid-google-hammer.c
drivers/hid/hid-vivaldi-common.c [new file with mode: 0644]
drivers/hid/hid-vivaldi-common.h [new file with mode: 0644]
drivers/hid/hid-vivaldi.c
drivers/hv/channel_mgmt.c
drivers/hv/hv_balloon.c
drivers/hv/hv_common.c
drivers/hv/ring_buffer.c
drivers/hv/vmbus_drv.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-ismt.c
drivers/i2c/busses/i2c-pasemi-core.c
drivers/i2c/busses/i2c-qcom-geni.c
drivers/i2c/i2c-dev.c
drivers/infiniband/core/cm.c
drivers/infiniband/hw/hfi1/mmu_rb.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/sw/rdmavt/qp.c
drivers/input/Kconfig
drivers/input/Makefile
drivers/input/input.c
drivers/input/joystick/adi.c
drivers/input/joystick/xpad.c
drivers/input/keyboard/Kconfig
drivers/input/keyboard/Makefile
drivers/input/keyboard/atkbd.c
drivers/input/keyboard/cros_ec_keyb.c
drivers/input/keyboard/mt6779-keypad.c [new file with mode: 0644]
drivers/input/keyboard/mtk-pmic-keys.c
drivers/input/misc/da9063_onkey.c
drivers/input/mouse/synaptics.c
drivers/input/serio/ps2-gpio.c
drivers/input/touchscreen/Kconfig
drivers/input/touchscreen/Makefile
drivers/input/touchscreen/goodix.c
drivers/input/touchscreen/goodix.h
drivers/input/touchscreen/imagis.c [new file with mode: 0644]
drivers/input/touchscreen/iqs5xx.c
drivers/input/touchscreen/stmfts.c
drivers/input/touchscreen/tsc200x-core.c
drivers/input/vivaldi-fmap.c [new file with mode: 0644]
drivers/iommu/omap-iommu.c
drivers/irqchip/Kconfig
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-gic.c
drivers/irqchip/irq-qcom-mpm.c
drivers/md/dm-core.h
drivers/md/dm-integrity.c
drivers/md/dm-ioctl.c
drivers/md/dm-ps-historical-service-time.c
drivers/md/dm-zone.c
drivers/md/dm.c
drivers/media/platform/nxp/Kconfig
drivers/media/platform/rockchip/rga/rga.c
drivers/media/tuners/si2157.c
drivers/memory/atmel-ebi.c
drivers/memory/fsl_ifc.c
drivers/memory/renesas-rpc-if.c
drivers/message/fusion/mptbase.c
drivers/misc/habanalabs/common/memory.c
drivers/mmc/core/block.c
drivers/mmc/core/core.c
drivers/mmc/core/mmc_test.c
drivers/mmc/host/mmci_stm32_sdmmc.c
drivers/mmc/host/renesas_sdhi_core.c
drivers/mmc/host/sdhci-xenon.c
drivers/mtd/ubi/build.c
drivers/mtd/ubi/fastmap.c
drivers/mtd/ubi/vmt.c
drivers/net/bonding/bond_main.c
drivers/net/dsa/lantiq_gswip.c
drivers/net/dsa/mv88e6xxx/port_hidden.c
drivers/net/dsa/ocelot/felix.c
drivers/net/dsa/ocelot/felix_vsc9959.c
drivers/net/dsa/realtek/Kconfig
drivers/net/dsa/realtek/realtek-mdio.c
drivers/net/dsa/realtek/realtek-smi.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
drivers/net/ethernet/aquantia/atlantic/aq_vec.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/ice/ice_arfs.c
drivers/net/ethernet/intel/ice/ice_eswitch.c
drivers/net/ethernet/intel/ice/ice_eswitch.h
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_nvm.c
drivers/net/ethernet/intel/ice/ice_sriov.c
drivers/net/ethernet/intel/ice/ice_virtchnl.c
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/intel/igc/igc_i225.c
drivers/net/ethernet/intel/igc/igc_phy.c
drivers/net/ethernet/intel/igc/igc_ptp.c
drivers/net/ethernet/mellanox/mlxsw/i2c.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
drivers/net/ethernet/microchip/lan966x/lan966x_main.c
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
drivers/net/hippi/rrunner.c
drivers/net/macvlan.c
drivers/net/mdio/fwnode_mdio.c
drivers/net/phy/marvell10g.c
drivers/net/phy/microchip_t1.c
drivers/net/tun.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vxlan/vxlan_core.c
drivers/net/wan/cosa.c
drivers/net/wireguard/device.c
drivers/net/wireless/ath/ath10k/sdio.c
drivers/net/wireless/ath/ath11k/mac.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/marvell/mwifiex/sdio.c
drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
drivers/net/wireless/ti/wlcore/sdio.c
drivers/nvme/host/core.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/configfs.c
drivers/nvme/target/core.c
drivers/nvme/target/fc.c
drivers/nvme/target/fcloop.c
drivers/nvme/target/io-cmd-file.c
drivers/nvme/target/loop.c
drivers/nvme/target/nvmet.h
drivers/nvme/target/passthru.c
drivers/nvme/target/rdma.c
drivers/nvme/target/tcp.c
drivers/pci/controller/pci-hyperv.c
drivers/perf/Kconfig
drivers/perf/fsl_imx8_ddr_perf.c
drivers/perf/qcom_l2_pmu.c
drivers/platform/chrome/Makefile
drivers/platform/chrome/cros_ec_debugfs.c
drivers/platform/chrome/cros_ec_sensorhub_ring.c
drivers/platform/chrome/cros_ec_sensorhub_trace.h [new file with mode: 0644]
drivers/platform/chrome/cros_ec_trace.h
drivers/platform/chrome/cros_ec_typec.c
drivers/platform/x86/acerhdf.c
drivers/platform/x86/amd-pmc.c
drivers/platform/x86/barco-p50-gpio.c
drivers/platform/x86/samsung-laptop.c
drivers/platform/x86/think-lmi.c
drivers/platform/x86/think-lmi.h
drivers/power/supply/power_supply_core.c
drivers/power/supply/samsung-sdi-battery.c
drivers/regulator/atc260x-regulator.c
drivers/regulator/rtq2134-regulator.c
drivers/regulator/wm8994-regulator.c
drivers/reset/reset-rzg2l-usbphy-ctrl.c
drivers/reset/tegra/reset-bpmp.c
drivers/rtc/Kconfig
drivers/rtc/Makefile
drivers/rtc/class.c
drivers/rtc/interface.c
drivers/rtc/rtc-ds1307.c
drivers/rtc/rtc-ds1685.c
drivers/rtc/rtc-efi.c
drivers/rtc/rtc-gamecube.c
drivers/rtc/rtc-hym8563.c
drivers/rtc/rtc-m41t80.c
drivers/rtc/rtc-mc146818-lib.c
drivers/rtc/rtc-mpc5121.c
drivers/rtc/rtc-opal.c
drivers/rtc/rtc-optee.c [new file with mode: 0644]
drivers/rtc/rtc-pcf2123.c
drivers/rtc/rtc-pcf2127.c
drivers/rtc/rtc-pcf85063.c
drivers/rtc/rtc-pcf8523.c
drivers/rtc/rtc-pcf8563.c
drivers/rtc/rtc-pl031.c
drivers/rtc/rtc-pm8xxx.c
drivers/rtc/rtc-spear.c
drivers/rtc/rtc-sun6i.c
drivers/rtc/rtc-wm8350.c
drivers/rtc/rtc-xgene.c
drivers/s390/char/sclp.c
drivers/s390/char/sclp_con.c
drivers/s390/char/sclp_vt220.c
drivers/s390/char/tape_34xx.c
drivers/s390/cio/device_fsm.c
drivers/s390/cio/eadm_sch.c
drivers/s390/crypto/ap_bus.h
drivers/s390/crypto/pkey_api.c
drivers/s390/crypto/vfio_ap_ops.c
drivers/s390/crypto/zcrypt_api.c
drivers/s390/crypto/zcrypt_card.c
drivers/s390/crypto/zcrypt_ep11misc.c
drivers/scsi/aha152x.c
drivers/scsi/aic7xxx/aic79xx_osm.h
drivers/scsi/aic7xxx/aic79xx_pci.c
drivers/scsi/aic7xxx/aic7xxx_osm.h
drivers/scsi/aic7xxx/aic7xxx_pci.c
drivers/scsi/bnx2fc/bnx2fc_hwi.c
drivers/scsi/bnx2i/bnx2i_hwi.c
drivers/scsi/bnx2i/bnx2i_iscsi.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/hisi_sas/hisi_sas_main.c
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/scsi/isci/host.c
drivers/scsi/libiscsi.c
drivers/scsi/libiscsi_tcp.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_nvme.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_config.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/mvsas/mv_init.c
drivers/scsi/pcmcia/sym53c500_cs.c
drivers/scsi/pm8001/pm80xx_hwi.c
drivers/scsi/pmcraid.c
drivers/scsi/pmcraid.h
drivers/scsi/qedi/qedi_iscsi.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_logging.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/scsi/sr.c
drivers/scsi/ufs/ufs-qcom.c
drivers/scsi/ufs/ufshcd-pci.c
drivers/scsi/ufs/ufshcd.h
drivers/scsi/ufs/ufshpb.c
drivers/scsi/virtio_scsi.c
drivers/scsi/zorro7xx.c
drivers/spi/atmel-quadspi.c
drivers/spi/spi-bcm-qspi.c
drivers/spi/spi-cadence-quadspi.c
drivers/spi/spi-intel-pci.c
drivers/spi/spi-mtk-nor.c
drivers/spi/spi-mxic.c
drivers/spi/spi-rpc-if.c
drivers/spi/spi.c
drivers/staging/r8188eu/core/rtw_br_ext.c
drivers/target/target_core_user.c
drivers/tty/serial/mpc52xx_uart.c
drivers/vdpa/ifcvf/ifcvf_base.c
drivers/vdpa/ifcvf/ifcvf_base.h
drivers/vdpa/ifcvf/ifcvf_main.c
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vdpa/vdpa.c
drivers/vfio/pci/vfio_pci_core.c
drivers/vhost/iotlb.c
drivers/vhost/vdpa.c
drivers/vhost/vhost.c
drivers/video/fbdev/core/fbmem.c
drivers/virt/vmgenid.c
drivers/virtio/Kconfig
drivers/virtio/virtio_pci_common.c
drivers/virtio/virtio_pci_common.h
drivers/virtio/virtio_pci_legacy.c
drivers/virtio/virtio_pci_modern.c
drivers/virtio/virtio_pci_modern_dev.c
drivers/virtio/virtio_ring.c
drivers/watchdog/Kconfig
drivers/watchdog/aspeed_wdt.c
drivers/watchdog/imx2_wdt.c
drivers/watchdog/ixp4xx_wdt.c
drivers/watchdog/orion_wdt.c
drivers/watchdog/renesas_wdt.c
drivers/watchdog/rti_wdt.c
drivers/watchdog/sp5100_tco.c
drivers/watchdog/sp5100_tco.h
drivers/watchdog/watchdog_dev.c
drivers/xen/balloon.c
drivers/xen/unpopulated-alloc.c
fs/9p/cache.c
fs/9p/v9fs.c
fs/9p/v9fs.h
fs/9p/vfs_addr.c
fs/9p/vfs_inode.c
fs/afs/dynroot.c
fs/afs/file.c
fs/afs/inode.c
fs/afs/internal.h
fs/afs/super.c
fs/afs/write.c
fs/aio.c
fs/binfmt_elf.c
fs/btrfs/block-group.c
fs/btrfs/block-group.h
fs/btrfs/compression.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.h
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/reflink.c
fs/btrfs/volumes.c
fs/btrfs/zoned.c
fs/buffer.c
fs/cachefiles/io.c
fs/cachefiles/namei.c
fs/cachefiles/xattr.c
fs/ceph/addr.c
fs/ceph/cache.c
fs/ceph/cache.h
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/super.h
fs/cifs/cifs_debug.c
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/cifsglob.h
fs/cifs/cifspdu.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/fscache.c
fs/cifs/fscache.h
fs/cifs/inode.c
fs/cifs/link.c
fs/cifs/misc.c
fs/cifs/netmisc.c
fs/cifs/smb2glob.h
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2pdu.h
fs/cifs/smb2proto.h
fs/cifs/trace.h
fs/cifs/transport.c
fs/crypto/crypto.c
fs/erofs/zdata.c
fs/erofs/zdata.h
fs/exfat/exfat_fs.h
fs/exfat/file.c
fs/exfat/namei.c
fs/exfat/super.c
fs/ext4/file.c
fs/ext4/inode.c
fs/ext4/readpage.c
fs/f2fs/checkpoint.c
fs/f2fs/data.c
fs/f2fs/file.c
fs/f2fs/node.c
fs/file_table.c
fs/fscache/Kconfig
fs/fscache/cache.c
fs/fscache/cookie.c
fs/fscache/internal.h
fs/fscache/io.c
fs/fuse/fuse_i.h
fs/gfs2/bmap.c
fs/gfs2/file.c
fs/gfs2/glock.c
fs/gfs2/inode.c
fs/gfs2/lock_dlm.c
fs/gfs2/rgrp.c
fs/gfs2/rgrp.h
fs/gfs2/super.c
fs/internal.h
fs/io_uring.c
fs/ioctl.c
fs/iomap/buffered-io.c
fs/jffs2/build.c
fs/jffs2/fs.c
fs/jffs2/jffs2_fs_i.h
fs/jffs2/scan.c
fs/kernfs/file.c
fs/ksmbd/oplock.c
fs/ksmbd/server.c
fs/ksmbd/smb2pdu.c
fs/ksmbd/smb2pdu.h
fs/ksmbd/transport_tcp.c
fs/namei.c
fs/namespace.c
fs/netfs/Makefile
fs/netfs/buffered_read.c [new file with mode: 0644]
fs/netfs/internal.h
fs/netfs/io.c [new file with mode: 0644]
fs/netfs/main.c [new file with mode: 0644]
fs/netfs/objects.c [new file with mode: 0644]
fs/netfs/read_helper.c [deleted file]
fs/netfs/stats.c
fs/nfs/Kconfig
fs/nfs/dir.c
fs/nfs/file.c
fs/nfs/fscache.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs42xattr.c
fs/nfs/nfs4file.c
fs/nfs/nfs4proc.c
fs/nfs/unlink.c
fs/nfsd/filecache.c
fs/nfsd/nfs2acl.c
fs/nilfs2/btnode.c
fs/nilfs2/btnode.h
fs/nilfs2/btree.c
fs/nilfs2/dat.c
fs/nilfs2/gcinode.c
fs/nilfs2/inode.c
fs/nilfs2/mdt.c
fs/nilfs2/mdt.h
fs/nilfs2/nilfs.h
fs/nilfs2/page.c
fs/nilfs2/page.h
fs/nilfs2/segment.c
fs/nilfs2/super.c
fs/ntfs/aops.c
fs/ocfs2/quota_global.c
fs/ocfs2/quota_local.c
fs/pipe.c
fs/posix_acl.c
fs/proc/bootconfig.c
fs/read_write.c
fs/seq_file.c
fs/smbfs_common/smb2pdu.h
fs/stat.c
fs/sysfs/file.c
fs/ubifs/dir.c
fs/ubifs/file.c
fs/ubifs/io.c
fs/ubifs/ioctl.c
fs/ubifs/journal.c
fs/ubifs/ubifs.h
fs/unicode/Makefile
fs/verity/verify.c
fs/xattr.c
fs/xfs/libxfs/xfs_alloc.c
fs/xfs/libxfs/xfs_alloc.h
fs/xfs/xfs_bio_io.c
fs/xfs/xfs_fsops.c
fs/xfs/xfs_icache.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_inode_item.h
fs/xfs/xfs_linux.h
fs/xfs/xfs_log.c
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_priv.h
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_mount.h
fs/xfs/xfs_super.c
fs/xfs/xfs_trans.c
fs/xfs/xfs_trans_ail.c
include/acpi/acpi_bus.h
include/asm-generic/mshyperv.h
include/asm-generic/tlb.h
include/asm-generic/unaligned.h
include/dt-bindings/clock/sun6i-rtc.h [new file with mode: 0644]
include/linux/balloon_compaction.h
include/linux/blk-cgroup.h
include/linux/blk_types.h
include/linux/clk/sunxi-ng.h
include/linux/cma.h
include/linux/dma-fence-array.h
include/linux/dma-fence-chain.h
include/linux/dma-fence-unwrap.h [new file with mode: 0644]
include/linux/fs.h
include/linux/fscache.h
include/linux/fsverity.h
include/linux/ftrace.h
include/linux/gfp.h
include/linux/gpio/consumer.h
include/linux/gpio/driver.h
include/linux/input.h
include/linux/input/vivaldi-fmap.h [new file with mode: 0644]
include/linux/kernel.h
include/linux/kfence.h
include/linux/kobject.h
include/linux/kvm_host.h
include/linux/kvm_types.h
include/linux/local_lock_internal.h
include/linux/mc146818rtc.h
include/linux/mmc/core.h
include/linux/mmzone.h
include/linux/net.h
include/linux/netdevice.h
include/linux/netfs.h
include/linux/nfs_xdr.h
include/linux/nvme.h
include/linux/pagemap.h
include/linux/pci-dma-compat.h [deleted file]
include/linux/pci.h
include/linux/posix_acl_xattr.h
include/linux/rtc.h
include/linux/rtc/ds1685.h
include/linux/sbitmap.h
include/linux/sched.h
include/linux/seq_file.h
include/linux/static_call.h
include/linux/sunrpc/svc.h
include/linux/sunrpc/xprt.h
include/linux/t10-pi.h
include/linux/timex.h
include/linux/user_events.h [moved from include/uapi/linux/user_events.h with 58% similarity]
include/linux/vdpa.h
include/linux/vfio_pci_core.h
include/linux/virtio_config.h
include/linux/vmalloc.h
include/linux/xarray.h
include/net/esp.h
include/net/flow_dissector.h
include/net/ip6_tunnel.h
include/net/ip_tunnels.h
include/net/netns/ipv6.h
include/net/tcp.h
include/scsi/libiscsi.h
include/scsi/scsi_transport_iscsi.h
include/sound/core.h
include/sound/memalloc.h
include/sound/pcm.h
include/trace/events/cachefiles.h
include/trace/events/netfs.h
include/trace/events/sunrpc.h
include/trace/stages/stage1_struct_define.h [moved from include/trace/stages/stage1_defines.h with 100% similarity]
include/trace/stages/stage2_data_offsets.h [moved from include/trace/stages/stage2_defines.h with 100% similarity]
include/trace/stages/stage3_trace_output.h [moved from include/trace/stages/stage3_defines.h with 100% similarity]
include/trace/stages/stage4_event_fields.h [moved from include/trace/stages/stage4_defines.h with 100% similarity]
include/trace/stages/stage5_get_offsets.h [moved from include/trace/stages/stage5_defines.h with 100% similarity]
include/trace/stages/stage6_event_callback.h [moved from include/trace/stages/stage6_defines.h with 100% similarity]
include/trace/stages/stage7_class_define.h [moved from include/trace/stages/stage7_defines.h with 100% similarity]
include/trace/trace_custom_events.h
include/trace/trace_events.h
include/uapi/linux/io_uring.h
include/uapi/linux/loop.h
include/uapi/linux/rtc.h
include/uapi/linux/stddef.h
include/uapi/linux/vhost.h
include/uapi/linux/virtio_config.h
include/uapi/linux/virtio_crypto.h
init/Kconfig
kernel/Kconfig.preempt
kernel/Makefile
kernel/cpu.c
kernel/dma/direct.c
kernel/dma/direct.h
kernel/dma/mapping.c
kernel/entry/common.c
kernel/events/core.c
kernel/irq/affinity.c
kernel/irq_work.c
kernel/sched/core.c
kernel/sched/idle.c
kernel/sched/sched.h
kernel/signal.c
kernel/smp.c
kernel/static_call.c
kernel/static_call_inline.c [new file with mode: 0644]
kernel/time/tick-sched.c
kernel/time/timer.c
kernel/trace/Kconfig
kernel/trace/fgraph.c
kernel/trace/trace_events_user.c
kernel/watch_queue.c
lib/kobject.c
lib/logic_iomem.c
lib/lz4/lz4_decompress.c
lib/sbitmap.c
lib/test_xarray.c
lib/xarray.c
mm/balloon_compaction.c
mm/compaction.c
mm/damon/core.c
mm/filemap.c
mm/gup.c
mm/highmem.c
mm/huge_memory.c
mm/hugetlb.c
mm/internal.h
mm/kasan/hw_tags.c
mm/kasan/kasan.h
mm/kfence/core.c
mm/kfence/kfence.h
mm/kfence/report.c
mm/kmemleak.c
mm/list_lru.c
mm/madvise.c
mm/memory.c
mm/mempolicy.c
mm/migrate.c
mm/mlock.c
mm/mremap.c
mm/page_alloc.c
mm/page_io.c
mm/page_vma_mapped.c
mm/readahead.c
mm/rmap.c
mm/secretmem.c
mm/shmem.c
mm/slab.c
mm/slab.h
mm/slab_common.c
mm/slob.c
mm/slub.c
mm/swap.c
mm/vmalloc.c
net/bridge/br_switchdev.c
net/can/isotp.c
net/core/dev.c
net/core/flow_dissector.c
net/core/rtnetlink.c
net/dsa/dsa2.c
net/dsa/port.c
net/dsa/slave.c
net/dsa/tag_hellcreek.c
net/ipv4/esp4.c
net/ipv4/ip_gre.c
net/ipv4/ip_tunnel.c
net/ipv4/netfilter/nf_flow_table_ipv4.c [deleted file]
net/ipv4/syncookies.c
net/ipv4/tcp_input.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_rate.c
net/ipv6/esp6.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/netfilter.c
net/ipv6/route.c
net/ipv6/syncookies.c
net/l3mdev/l3mdev.c
net/mac80211/debugfs_sta.c
net/mctp/device.c
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_set_rbtree.c
net/netfilter/nft_socket.c
net/netlink/af_netlink.c
net/nfc/nci/core.c
net/openvswitch/flow_netlink.c
net/packet/af_packet.c
net/rxrpc/net_ns.c
net/sched/cls_api.c
net/sched/cls_flower.c
net/sched/cls_u32.c
net/sched/sch_taprio.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
net/smc/af_smc.c
net/smc/smc.h
net/smc/smc_clc.c
net/smc/smc_close.c
net/smc/smc_pnet.c
net/sunrpc/clnt.c
net/sunrpc/sched.c
net/sunrpc/socklib.c
net/sunrpc/svc_xprt.c
net/sunrpc/svcsock.c
net/sunrpc/xprt.c
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
net/sunrpc/xprtsock.c
net/wireless/nl80211.c
net/wireless/scan.c
net/xfrm/xfrm_policy.c
scripts/Makefile.build
scripts/Makefile.clean
scripts/Makefile.lib
scripts/basic/fixdep.c
scripts/gcc-plugins/latent_entropy_plugin.c
scripts/get_abi.pl
scripts/get_feat.pl
scripts/kallsyms.c
scripts/kconfig/confdata.c
scripts/kernel-doc
scripts/link-vmlinux.sh
scripts/mod/modpost.c
security/Kconfig
sound/core/init.c
sound/core/memalloc.c
sound/core/pcm.c
sound/core/pcm_lib.c
sound/core/pcm_misc.c
sound/core/pcm_native.c
sound/drivers/mtpav.c
sound/hda/hdac_i915.c
sound/hda/intel-dsp-config.c
sound/isa/cs423x/cs4236.c
sound/isa/galaxy/galaxy.c
sound/isa/sc6000.c
sound/oss/dmasound/dmasound.h
sound/oss/dmasound/dmasound_core.c
sound/pci/ad1889.c
sound/pci/ali5451/ali5451.c
sound/pci/als300.c
sound/pci/als4000.c
sound/pci/atiixp.c
sound/pci/atiixp_modem.c
sound/pci/au88x0/au88x0.c
sound/pci/aw2/aw2-alsa.c
sound/pci/azt3328.c
sound/pci/bt87x.c
sound/pci/ca0106/ca0106_main.c
sound/pci/cmipci.c
sound/pci/cs4281.c
sound/pci/cs5535audio/cs5535audio.c
sound/pci/echoaudio/echoaudio.c
sound/pci/emu10k1/emu10k1x.c
sound/pci/ens1370.c
sound/pci/es1938.c
sound/pci/es1968.c
sound/pci/fm801.c
sound/pci/hda/patch_cs8409-tables.c
sound/pci/hda/patch_cs8409.c
sound/pci/hda/patch_cs8409.h
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/ice1712/ice1724.c
sound/pci/intel8x0.c
sound/pci/intel8x0m.c
sound/pci/korg1212/korg1212.c
sound/pci/lola/lola.c
sound/pci/lx6464es/lx6464es.c
sound/pci/maestro3.c
sound/pci/nm256/nm256.c
sound/pci/oxygen/oxygen_lib.c
sound/pci/riptide/riptide.c
sound/pci/rme32.c
sound/pci/rme96.c
sound/pci/rme9652/hdsp.c
sound/pci/rme9652/hdspm.c
sound/pci/rme9652/rme9652.c
sound/pci/sis7019.c
sound/pci/sonicvibes.c
sound/pci/via82xx.c
sound/pci/via82xx_modem.c
sound/soc/codecs/mt6358.c
sound/soc/fsl/fsl-asoc-card.c
sound/soc/rockchip/rockchip_i2s_tdm.c
sound/soc/sof/intel/Kconfig
sound/usb/pcm.c
sound/usb/usbaudio.h
sound/x86/intel_hdmi_audio.c
tools/arch/arm64/include/asm/cputype.h
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/msr-index.h
tools/bpf/bpftool/Makefile
tools/build/Makefile
tools/build/feature/Makefile
tools/counter/Makefile
tools/gpio/Makefile
tools/hv/Makefile
tools/iio/Makefile
tools/include/uapi/asm-generic/mman-common.h
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/vhost.h
tools/lib/api/Makefile
tools/lib/bpf/Makefile
tools/lib/perf/Makefile
tools/lib/perf/cpumap.c
tools/lib/perf/evlist.c
tools/lib/perf/include/internal/cpumap.h
tools/lib/perf/include/internal/evlist.h
tools/lib/subcmd/Makefile
tools/objtool/Makefile
tools/objtool/check.c
tools/pci/Makefile
tools/perf/Documentation/perf.txt
tools/perf/Makefile.config
tools/perf/Makefile.perf
tools/perf/arch/arm/util/cs-etm.c
tools/perf/arch/arm64/util/arm-spe.c
tools/perf/arch/x86/util/intel-bts.c
tools/perf/arch/x86/util/intel-pt.c
tools/perf/bench/epoll-ctl.c
tools/perf/bench/epoll-wait.c
tools/perf/bench/evlist-open-close.c
tools/perf/bench/futex-hash.c
tools/perf/bench/futex-lock-pi.c
tools/perf/bench/futex-requeue.c
tools/perf/bench/futex-wake-parallel.c
tools/perf/bench/futex-wake.c
tools/perf/bench/numa.c
tools/perf/builtin-ftrace.c
tools/perf/builtin-record.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/perf.c
tools/perf/python/tracepoint.py
tools/perf/tests/dwarf-unwind.c
tools/perf/tests/perf-time-to-tsc.c
tools/perf/trace/beauty/include/linux/socket.h
tools/perf/util/annotate.c
tools/perf/util/arm64-frame-pointer-unwind-support.c
tools/perf/util/auxtrace.c
tools/perf/util/bpf_ftrace.c
tools/perf/util/evlist.c
tools/perf/util/hashmap.c
tools/perf/util/header.c
tools/perf/util/header.h
tools/perf/util/machine.c
tools/perf/util/parse-events.c
tools/perf/util/record.c
tools/perf/util/session.c
tools/perf/util/setup.py
tools/perf/util/sideband_evlist.c
tools/perf/util/stat-display.c
tools/perf/util/stat.c
tools/perf/util/synthetic-events.c
tools/perf/util/top.c
tools/perf/util/unwind-libdw.c
tools/perf/util/unwind-libdw.h
tools/perf/util/unwind-libunwind-local.c
tools/perf/util/unwind-libunwind.c
tools/perf/util/unwind.h
tools/power/x86/intel-speed-select/Makefile
tools/scripts/Makefile.include
tools/scripts/utilities.mak
tools/spi/Makefile
tools/testing/nvdimm/test/nfit.c
tools/testing/selftests/drivers/net/mlxsw/spectrum-2/vxlan_flooding_ipv6.sh
tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
tools/testing/selftests/kselftest_harness.h
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/aarch64/arch_timer.c
tools/testing/selftests/kvm/aarch64/get-reg-list.c
tools/testing/selftests/kvm/aarch64/vcpu_width_config.c [new file with mode: 0644]
tools/testing/selftests/kvm/dirty_log_perf_test.c
tools/testing/selftests/kvm/include/riscv/processor.h
tools/testing/selftests/kvm/lib/riscv/processor.c
tools/testing/selftests/lib.mk
tools/testing/selftests/mqueue/mq_perf_tests.c
tools/testing/selftests/pid_namespace/Makefile
tools/testing/selftests/pidfd/pidfd_wait.c
tools/testing/selftests/proc/proc-pid-vm.c
tools/testing/selftests/vDSO/vdso_test_correctness.c
tools/testing/selftests/wireguard/qemu/arch/i686.config
tools/testing/selftests/wireguard/qemu/arch/x86_64.config
tools/testing/selftests/x86/Makefile
tools/testing/selftests/x86/amx.c
tools/tracing/rtla/Makefile
tools/usb/Makefile
tools/virtio/Makefile
tools/virtio/linux/dma-mapping.h
tools/vm/page_owner_sort.c
usr/Makefile
usr/include/Makefile
virt/kvm/kvm_main.c
virt/kvm/pfncache.c

index 8fd9b3c7a42b2b3ff4a0d449e9957a7034fd79a8..93458154ce7d45ebbd177d6d8429808474e2aa53 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -213,6 +213,7 @@ Kees Cook <keescook@chromium.org> <kees@ubuntu.com>
 Keith Busch <kbusch@kernel.org> <keith.busch@intel.com>
 Keith Busch <kbusch@kernel.org> <keith.busch@linux.intel.com>
 Kenneth W Chen <kenneth.w.chen@intel.com>
+Kirill Tkhai <kirill.tkhai@openvz.org> <ktkhai@virtuozzo.com>
 Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
 Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
 Koushik <raghavendra.koushik@neterion.com>
@@ -390,6 +391,10 @@ Uwe Kleine-König <ukleinek@strlen.de>
 Uwe Kleine-König <ukl@pengutronix.de>
 Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
 Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
+Vasily Averin <vasily.averin@linux.dev> <vvs@virtuozzo.com>
+Vasily Averin <vasily.averin@linux.dev> <vvs@openvz.org>
+Vasily Averin <vasily.averin@linux.dev> <vvs@parallels.com>
+Vasily Averin <vasily.averin@linux.dev> <vvs@sw.ru>
 Vinod Koul <vkoul@kernel.org> <vinod.koul@intel.com>
 Vinod Koul <vkoul@kernel.org> <vinod.koul@linux.intel.com>
 Vinod Koul <vkoul@kernel.org> <vkoul@infradead.org>
index 05820365f1ec689147d89ab596aeab05e8d7a901..4cdba3477176fd89d8e03e67595843d6f7091aef 100644 (file)
@@ -116,7 +116,7 @@ Description:
                                            <value>[ForceIf:<attribute>=<value>]
                                            <value>[ForceIfNot:<attribute>=<value>]
 
-                                       For example:
+                                       For example::
 
                                            LegacyOrom/dell_value_modifier has value:
                                                    Disabled[ForceIf:SecureBoot=Enabled]
@@ -212,7 +212,7 @@ Description:
                the next boot.
 
                Lenovo specific class extensions
-               ------------------------------
+               --------------------------------
 
                On Lenovo systems the following additional settings are available:
 
@@ -246,9 +246,7 @@ Description:
                                        that is being referenced (e.g hdd0, hdd1 etc)
                                        This attribute defaults to device 0.
 
-               certificate:
-               signature:
-               save_signature:
+               certificate, signature, save_signature:
                                        These attributes are used for certificate based authentication. This is
                                        used in conjunction with a signing server as an alternative to password
                                        based authentication.
@@ -257,22 +255,27 @@ Description:
                                        The attributes can be displayed to check the stored value.
 
                                        Some usage examples:
-                                       Installing a certificate to enable feature:
-                                               echo <supervisor password > authentication/Admin/current_password
-                                               echo <signed certificate> > authentication/Admin/certificate
 
-                                       Updating the installed certificate:
-                                               echo <signature> > authentication/Admin/signature
-                                               echo <signed certificate> > authentication/Admin/certificate
+                                               Installing a certificate to enable feature::
+
+                                                       echo "supervisor password" > authentication/Admin/current_password
+                                                       echo "signed certificate" > authentication/Admin/certificate
+
+                                               Updating the installed certificate::
+
+                                                       echo "signature" > authentication/Admin/signature
+                                                       echo "signed certificate" > authentication/Admin/certificate
 
-                                       Removing the installed certificate:
-                                               echo <signature> > authentication/Admin/signature
-                                               echo '' > authentication/Admin/certificate
+                                               Removing the installed certificate::
 
-                                       Changing a BIOS setting:
-                                               echo <signature> > authentication/Admin/signature
-                                               echo <save signature> > authentication/Admin/save_signature
-                                               echo Enable > attribute/PasswordBeep/current_value
+                                                       echo "signature" > authentication/Admin/signature
+                                                       echo "" > authentication/Admin/certificate
+
+                                               Changing a BIOS setting::
+
+                                                       echo "signature" > authentication/Admin/signature
+                                                       echo "save signature" > authentication/Admin/save_signature
+                                                       echo Enable > attribute/PasswordBeep/current_value
 
                                        You cannot enable certificate authentication if a supervisor password
                                        has not been set.
@@ -288,9 +291,10 @@ Description:
                certificate_to_password:
                                        Write only attribute used to switch from certificate based authentication
                                        back to password based.
-                                       Usage:
-                                               echo <signature> > authentication/Admin/signature
-                                               echo <password> > authentication/Admin/certificate_to_password
+                                       Usage::
+
+                                               echo "signature" > authentication/Admin/signature
+                                               echo "password" > authentication/Admin/certificate_to_password
 
 
 What:          /sys/class/firmware-attributes/*/attributes/pending_reboot
@@ -345,7 +349,7 @@ Description:
 
                    # echo "factory" > /sys/class/firmware-attributes/*/device/attributes/reset_bios
                    # cat /sys/class/firmware-attributes/*/device/attributes/reset_bios
-                   builtinsafe lastknowngood [factory] custom
+                   builtinsafe lastknowngood [factory] custom
 
                Note that any changes to this attribute requires a reboot
                for changes to take effect.
index ab122125ff9aed4f46089fd329deed3193b0ac4f..96b92c105ec49d9b47edc351195d2678c90324e1 100644 (file)
@@ -13,17 +13,19 @@ Description:
                Should the operation fail, one of the following error codes
                may be returned:
 
+               ==========      =====
                Error Code      Cause
-               ----------      -----
-               EIO             General mailbox failure. Log may indicate cause.
-               EBUSY           Mailbox is owned by another agent.
-               EPERM           SDSI capability is not enabled in hardware.
-               EPROTO          Failure in mailbox protocol detected by driver.
+               ==========      =====
+               EIO             General mailbox failure. Log may indicate cause.
+               EBUSY           Mailbox is owned by another agent.
+               EPERM           SDSI capability is not enabled in hardware.
+               EPROTO          Failure in mailbox protocol detected by driver.
                                See log for details.
-               EOVERFLOW       For provision commands, the size of the data
+               EOVERFLOW       For provision commands, the size of the data
                                exceeds what may be written.
-               ESPIPE          Seeking is not allowed.
-               ETIMEDOUT       Failure to complete mailbox transaction in time.
+               ESPIPE          Seeking is not allowed.
+               ETIMEDOUT       Failure to complete mailbox transaction in time.
+               ==========      =====
 
 What:          /sys/bus/auxiliary/devices/intel_vsec.sdsi.X/guid
 Date:          Feb 2022
index 05482374a741bd100c62a062a3686e8811199526..bb4681a01811603caad50466790d1bd4794c98f0 100644 (file)
@@ -9,8 +9,9 @@ Description:    Shows all enabled kernel features.
 What:          /sys/fs/erofs/<disk>/sync_decompress
 Date:          November 2021
 Contact:       "Huang Jianan" <huangjianan@oppo.com>
-Description:   Control strategy of sync decompression
+Description:   Control strategy of sync decompression:
+
                - 0 (default, auto): enable for readpage, and enable for
-                                    readahead on atomic contexts only,
+                 readahead on atomic contexts only.
                - 1 (force on): enable for readpage and readahead.
                - 2 (force off): disable for all situations.
index 87c6f4a6ca32bcfbe6864f24d55303d05f11e6f6..67a850b5561732f50e0d7b9a873c0fbda79947bd 100644 (file)
@@ -278,20 +278,20 @@ appropriate parameters.  In general this allows more efficient DMA
 on systems where System RAM exists above 4G _physical_ address.
 
 Drivers for all PCI-X and PCIe compliant devices must call
-pci_set_dma_mask() as they are 64-bit DMA devices.
+set_dma_mask() as they are 64-bit DMA devices.
 
 Similarly, drivers must also "register" this capability if the device
-can directly address "consistent memory" in System RAM above 4G physical
-address by calling pci_set_consistent_dma_mask().
+can directly address "coherent memory" in System RAM above 4G physical
+address by calling dma_set_coherent_mask().
 Again, this includes drivers for all PCI-X and PCIe compliant devices.
 Many 64-bit "PCI" devices (before PCI-X) and some PCI-X devices are
 64-bit DMA capable for payload ("streaming") data but not control
-("consistent") data.
+("coherent") data.
 
 
 Setup shared control data
 -------------------------
-Once the DMA masks are set, the driver can allocate "consistent" (a.k.a. shared)
+Once the DMA masks are set, the driver can allocate "coherent" (a.k.a. shared)
 memory.  See Documentation/core-api/dma-api.rst for a full description of
 the DMA APIs. This section is just a reminder that it needs to be done
 before enabling DMA on the device.
@@ -367,7 +367,7 @@ steps need to be performed:
   - Disable the device from generating IRQs
   - Release the IRQ (free_irq())
   - Stop all DMA activity
-  - Release DMA buffers (both streaming and consistent)
+  - Release DMA buffers (both streaming and coherent)
   - Unregister from other subsystems (e.g. scsi or netdev)
   - Disable device from responding to MMIO/IO Port addresses
   - Release MMIO/IO Port resource(s)
@@ -420,7 +420,7 @@ Once DMA is stopped, clean up streaming DMA first.
 I.e. unmap data buffers and return buffers to "upstream"
 owners if there is one.
 
-Then clean up "consistent" buffers which contain the control data.
+Then clean up "coherent" buffers which contain the control data.
 
 See Documentation/core-api/dma-api.rst for details on unmapping interfaces.
 
index b7ccaa2ea86739dc3bc8da8971de8ac5ddd46e92..3f1cc5e317ed4a5ad001082c9c589b6008f68db9 100644 (file)
                        fully seed the kernel's CRNG. Default is controlled
                        by CONFIG_RANDOM_TRUST_CPU.
 
+       random.trust_bootloader={on,off}
+                       [KNL] Enable or disable trusting the use of a
+                       seed passed by the bootloader (if available) to
+                       fully seed the kernel's CRNG. Default is controlled
+                       by CONFIG_RANDOM_TRUST_BOOTLOADER.
+
        randomize_kstack_offset=
                        [KNL] Enable or disable kernel stack offset
                        randomization, which provides roughly 5 bits of
index a137a0e6d0682038b9b578489937834e9c2b5e49..77e0ece2b1d6f8e632e7d28d17fd1c60fcf0b5c4 100644 (file)
@@ -315,11 +315,15 @@ indeed the normal API is implemented in terms of the advanced API.  The
 advanced API is only available to modules with a GPL-compatible license.
 
 The advanced API is based around the xa_state.  This is an opaque data
-structure which you declare on the stack using the XA_STATE()
-macro.  This macro initialises the xa_state ready to start walking
-around the XArray.  It is used as a cursor to maintain the position
-in the XArray and let you compose various operations together without
-having to restart from the top every time.
+structure which you declare on the stack using the XA_STATE() macro.
+This macro initialises the xa_state ready to start walking around the
+XArray.  It is used as a cursor to maintain the position in the XArray
+and let you compose various operations together without having to restart
+from the top every time.  The contents of the xa_state are protected by
+the rcu_read_lock() or the xas_lock().  If you need to drop whichever of
+those locks is protecting your state and tree, you must call xas_pause()
+so that future calls do not rely on the parts of the state which were
+left unprotected.
 
 The xa_state is also used to store errors.  You can call
 xas_error() to retrieve the error.  All operations check whether
index aa2cea821e25cf7cfb64fe2429c8ce0332a72c94..ff9c85a0bff21da14b477e3e3fafd487e1b25a5f 100644 (file)
@@ -26,10 +26,7 @@ The fundamental unit in KUnit is the test case. The KUnit test cases are
 grouped into KUnit suites. A KUnit test case is a function with type
 signature ``void (*)(struct kunit *test)``.
 These test case functions are wrapped in a struct called
-``struct kunit_case``. For code, see:
-
-.. kernel-doc:: include/kunit/test.h
-       :identifiers: kunit_case
+struct kunit_case.
 
 .. note:
        ``generate_params`` is optional for non-parameterized tests.
@@ -152,18 +149,12 @@ Parameterized Tests
 Each KUnit parameterized test is associated with a collection of
 parameters. The test is invoked multiple times, once for each parameter
 value and the parameter is stored in the ``param_value`` field.
-The test case includes a ``KUNIT_CASE_PARAM()`` macro that accepts a
+The test case includes a KUNIT_CASE_PARAM() macro that accepts a
 generator function.
 The generator function is passed the previous parameter and returns the next
 parameter. It also provides a macro to generate common-case generators based on
 arrays.
 
-For code, see:
-
-.. kernel-doc:: include/kunit/test.h
-       :identifiers: KUNIT_ARRAY_PARAM
-
-
 kunit_tool (Command Line Test Harness)
 ======================================
 
index ad168d16968f5244c23798fc8467fe898cef8cee..867a4bba6bf69c7ccd4999129e595d3cb51f10a0 100644 (file)
@@ -41,13 +41,18 @@ or ``VFAT_FS``. To run ``FAT_KUNIT_TEST``, the ``.kunitconfig`` has:
        CONFIG_MSDOS_FS=y
        CONFIG_FAT_KUNIT_TEST=y
 
-1. A good starting point for the ``.kunitconfig``, is the KUnit default
-   config. Run the command:
+1. A good starting point for the ``.kunitconfig`` is the KUnit default config.
+   You can generate it by running:
 
 .. code-block:: bash
 
        cd $PATH_TO_LINUX_REPO
-       cp tools/testing/kunit/configs/default.config .kunitconfig
+       tools/testing/kunit/kunit.py config
+       cat .kunit/.kunitconfig
+
+.. note ::
+   ``.kunitconfig`` lives in the ``--build_dir`` used by kunit.py, which is
+   ``.kunit`` by default.
 
 .. note ::
    You may want to remove CONFIG_KUNIT_ALL_TESTS from the ``.kunitconfig`` as
index 6ce0b212ec6d6447f4ef037ec2588278d72d0ef4..606b4b1b709da89c6c28f346b6b194c444c146d7 100644 (file)
@@ -81,4 +81,4 @@ Example:
                };
        };
 
-[1]. Documentation/devicetree/bindings/arm/idle-states.yaml
+[1]. Documentation/devicetree/bindings/cpu/idle-states.yaml
index 8b77cf83a095a3a5553c107a57ef0ab0093e4b57..dd83ef278af03b7c4bb4c29d075c901324debb2c 100644 (file)
@@ -101,7 +101,7 @@ properties:
       bindings in [1]) must specify this property.
 
       [1] Kernel documentation - ARM idle states bindings
-        Documentation/devicetree/bindings/arm/idle-states.yaml
+        Documentation/devicetree/bindings/cpu/idle-states.yaml
 
 patternProperties:
   "^power-domain-":
index 0afec83cc72327c799106c444f03b72a358fd8b1..564ae6aaccf762a6d6f9bab099eed7347769583a 100644 (file)
@@ -13,7 +13,6 @@ maintainers:
 properties:
   compatible:
     enum:
-      - nvidia,tegra20-pmc
       - nvidia,tegra20-pmc
       - nvidia,tegra30-pmc
       - nvidia,tegra114-pmc
index bd40213302dadeffde7648e9b43c79681b6e3fbc..fced4082b047ba5b54fdf459dbac4cfe0940b731 100644 (file)
@@ -34,7 +34,6 @@ properties:
     oneOf:
       - items:
           - enum:
-              - ti,sysc-omap2
               - ti,sysc-omap2
               - ti,sysc-omap4
               - ti,sysc-omap4-simple
index f14f1d39da3629d81e95ac2377bea8233afdd8ab..d819dfaafff9b3baa5385313ab7bda403067a62e 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos SoC Audio SubSystem clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 4e8062860986aa5726b585bd3ce097cbab4cdffc..0589a63e273a126935eaffb54b0fffd6605380a6 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos SoC clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 64d027dbe3b250585c1a27e942993fabdcf628fb..c98eff64f2b58de0051f32e67f2a02d82c70a32a 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung SoC external/osc/XXTI/XusbXTI clock
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 1ed64add4355d34819275eafea95440c03868b07..b644bbd0df3841bbc0309f1914ee276ceb567fb8 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos4412 SoC ISP clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index a3fac5c6809d20eeaeff5f4d0d2a97afeaa0d70f..b05f83533e3deb4d0f396123eebd1a4150379a99 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos5260 SoC clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 032862e9f55b71ec8c29a62d8aba935f01531ada..b737c9d35a1c2af70979bf1896a90d9c14894cff 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos5410 SoC clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index edd1b4ac433476a80bfbc23657bc43619e975b37..3f9326e09f79ef55af1178a7bdad64693a1b8202 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos5433 SoC clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 599baf0b7231888b568c063461d5fb27ae272560..c137c6744ef91fdf4e5658d1a1cc677ad68bab0f 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos7 SoC clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 7e5a9cac2fd2823decfd8328cac329464e53f3af..5073e569a47fd54d65081bfb62299c749eab14a2 100644 (file)
@@ -9,7 +9,7 @@ title: Samsung Exynos7885 SoC clock controller
 maintainers:
   - Dávid Virág <virag.david003@gmail.com>
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 80ba60838f2badc08c75edda51540955d0a7bd2d..aa11815ad3a308fbbba4686177f1e4acdc9909c9 100644 (file)
@@ -9,7 +9,7 @@ title: Samsung Exynos850 SoC clock controller
 maintainers:
   - Sam Protsenko <semen.protsenko@linaro.org>
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 1410c51e0e7df84e422fd54a75bcb3b05ace7b69..9248bfc16d484a12b2a8fb37ab87f11a15833a42 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2M and S5M family clock generator block
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index ae8f8fc932338194b5119d9a9b9606bc715460b4..2659854ea1c0aeeb915e3ab295f3dca95f935d00 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung S5Pv210 SoC Audio SubSystem clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index dcb29a2d11599b1b87c231e3adf105cd0eaf28e3..67a33665cf00bfe88d965f067a5348a262612fd8 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung S5P6442/S5PC110/S5PV210 SoC clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
similarity index 74%
rename from Documentation/devicetree/bindings/arm/idle-states.yaml
rename to Documentation/devicetree/bindings/cpu/idle-states.yaml
index 4d381fa1ee5768f62715d6f033b2c25e2e42fb5c..fa4d4142ac93737bf1e9bb12206d533f8d0a74c1 100644 (file)
@@ -1,25 +1,30 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/arm/idle-states.yaml#
+$id: http://devicetree.org/schemas/cpu/idle-states.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
-title: ARM idle states binding description
+title: Idle states binding description
 
 maintainers:
   - Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+  - Anup Patel <anup@brainfault.org>
 
 description: |+
   ==========================================
   1 - Introduction
   ==========================================
 
-  ARM systems contain HW capable of managing power consumption dynamically,
-  where cores can be put in different low-power states (ranging from simple wfi
-  to power gating) according to OS PM policies. The CPU states representing the
-  range of dynamic idle states that a processor can enter at run-time, can be
-  specified through device tree bindings representing the parameters required to
-  enter/exit specific idle states on a given processor.
+  ARM and RISC-V systems contain HW capable of managing power consumption
+  dynamically, where cores can be put in different low-power states (ranging
+  from simple wfi to power gating) according to OS PM policies. The CPU states
+  representing the range of dynamic idle states that a processor can enter at
+  run-time, can be specified through device tree bindings representing the
+  parameters required to enter/exit specific idle states on a given processor.
+
+  ==========================================
+  2 - ARM idle states
+  ==========================================
 
   According to the Server Base System Architecture document (SBSA, [3]), the
   power states an ARM CPU can be put into are identified by the following list:
@@ -43,8 +48,23 @@ description: |+
   The device tree binding definition for ARM idle states is the subject of this
   document.
 
+  ==========================================
+  3 - RISC-V idle states
+  ==========================================
+
+  On RISC-V systems, the HARTs (or CPUs) [6] can be put in platform specific
+  suspend (or idle) states (ranging from simple WFI, power gating, etc). The
+  RISC-V SBI v0.3 (or higher) [7] hart state management extension provides a
+  standard mechanism for OS to request HART state transitions.
+
+  The platform specific suspend (or idle) states of a hart can be either
+  retentive or non-rententive in nature. A retentive suspend state will
+  preserve HART registers and CSR values for all privilege modes whereas
+  a non-retentive suspend state will not preserve HART registers and CSR
+  values.
+
   ===========================================
-  2 - idle-states definitions
+  4 - idle-states definitions
   ===========================================
 
   Idle states are characterized for a specific system through a set of
@@ -211,10 +231,10 @@ description: |+
   properties specification that is the subject of the following sections.
 
   ===========================================
-  3 - idle-states node
+  5 - idle-states node
   ===========================================
 
-  ARM processor idle states are defined within the idle-states node, which is
+  The processor idle states are defined within the idle-states node, which is
   a direct child of the cpus node [1] and provides a container where the
   processor idle states, defined as device tree nodes, are listed.
 
@@ -223,7 +243,7 @@ description: |+
   just supports idle_standby, an idle-states node is not required.
 
   ===========================================
-  4 - References
+  6 - References
   ===========================================
 
   [1] ARM Linux Kernel documentation - CPUs bindings
@@ -238,9 +258,15 @@ description: |+
   [4] ARM Architecture Reference Manuals
       http://infocenter.arm.com/help/index.jsp
 
-  [6] ARM Linux Kernel documentation - Booting AArch64 Linux
+  [5] ARM Linux Kernel documentation - Booting AArch64 Linux
       Documentation/arm64/booting.rst
 
+  [6] RISC-V Linux Kernel documentation - CPUs bindings
+      Documentation/devicetree/bindings/riscv/cpus.yaml
+
+  [7] RISC-V Supervisor Binary Interface (SBI)
+      http://github.com/riscv/riscv-sbi-doc/riscv-sbi.adoc
+
 properties:
   $nodename:
     const: idle-states
@@ -253,7 +279,7 @@ properties:
       On ARM 32-bit systems this property is optional
 
       This assumes that the "enable-method" property is set to "psci" in the cpu
-      node[6] that is responsible for setting up CPU idle management in the OS
+      node[5] that is responsible for setting up CPU idle management in the OS
       implementation.
     const: psci
 
@@ -265,8 +291,8 @@ patternProperties:
       as follows.
 
       The idle state entered by executing the wfi instruction (idle_standby
-      SBSA,[3][4]) is considered standard on all ARM platforms and therefore
-      must not be listed.
+      SBSA,[3][4]) is considered standard on all ARM and RISC-V platforms and
+      therefore must not be listed.
 
       In addition to the properties listed above, a state node may require
       additional properties specific to the entry-method defined in the
@@ -275,7 +301,27 @@ patternProperties:
 
     properties:
       compatible:
-        const: arm,idle-state
+        enum:
+          - arm,idle-state
+          - riscv,idle-state
+
+      arm,psci-suspend-param:
+        $ref: /schemas/types.yaml#/definitions/uint32
+        description: |
+          power_state parameter to pass to the ARM PSCI suspend call.
+
+          Device tree nodes that require usage of PSCI CPU_SUSPEND function
+          (i.e. idle states node with entry-method property is set to "psci")
+          must specify this property.
+
+      riscv,sbi-suspend-param:
+        $ref: /schemas/types.yaml#/definitions/uint32
+        description: |
+          suspend_type parameter to pass to the RISC-V SBI HSM suspend call.
+
+          This property is required in idle state nodes of device tree meant
+          for RISC-V systems. For more details on the suspend_type parameter
+          refer the SBI specifiation v0.3 (or higher) [7].
 
       local-timer-stop:
         description:
@@ -317,6 +363,8 @@ patternProperties:
         description:
           A string used as a descriptive name for the idle state.
 
+    additionalProperties: false
+
     required:
       - compatible
       - entry-latency-us
@@ -658,4 +706,150 @@ examples:
         };
     };
 
+  - |
+    // Example 3 (RISC-V 64-bit, 4-cpu systems, two clusters):
+
+    cpus {
+        #size-cells = <0>;
+        #address-cells = <1>;
+
+        cpu@0 {
+            device_type = "cpu";
+            compatible = "riscv";
+            reg = <0x0>;
+            riscv,isa = "rv64imafdc";
+            mmu-type = "riscv,sv48";
+            cpu-idle-states = <&CPU_RET_0_0>, <&CPU_NONRET_0_0>,
+                            <&CLUSTER_RET_0>, <&CLUSTER_NONRET_0>;
+
+            cpu_intc0: interrupt-controller {
+                #interrupt-cells = <1>;
+                compatible = "riscv,cpu-intc";
+                interrupt-controller;
+            };
+        };
+
+        cpu@1 {
+            device_type = "cpu";
+            compatible = "riscv";
+            reg = <0x1>;
+            riscv,isa = "rv64imafdc";
+            mmu-type = "riscv,sv48";
+            cpu-idle-states = <&CPU_RET_0_0>, <&CPU_NONRET_0_0>,
+                            <&CLUSTER_RET_0>, <&CLUSTER_NONRET_0>;
+
+            cpu_intc1: interrupt-controller {
+                #interrupt-cells = <1>;
+                compatible = "riscv,cpu-intc";
+                interrupt-controller;
+            };
+        };
+
+        cpu@10 {
+            device_type = "cpu";
+            compatible = "riscv";
+            reg = <0x10>;
+            riscv,isa = "rv64imafdc";
+            mmu-type = "riscv,sv48";
+            cpu-idle-states = <&CPU_RET_1_0>, <&CPU_NONRET_1_0>,
+                            <&CLUSTER_RET_1>, <&CLUSTER_NONRET_1>;
+
+            cpu_intc10: interrupt-controller {
+                #interrupt-cells = <1>;
+                compatible = "riscv,cpu-intc";
+                interrupt-controller;
+            };
+        };
+
+        cpu@11 {
+            device_type = "cpu";
+            compatible = "riscv";
+            reg = <0x11>;
+            riscv,isa = "rv64imafdc";
+            mmu-type = "riscv,sv48";
+            cpu-idle-states = <&CPU_RET_1_0>, <&CPU_NONRET_1_0>,
+                            <&CLUSTER_RET_1>, <&CLUSTER_NONRET_1>;
+
+            cpu_intc11: interrupt-controller {
+                #interrupt-cells = <1>;
+                compatible = "riscv,cpu-intc";
+                interrupt-controller;
+            };
+        };
+
+        idle-states {
+            CPU_RET_0_0: cpu-retentive-0-0 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x10000000>;
+                entry-latency-us = <20>;
+                exit-latency-us = <40>;
+                min-residency-us = <80>;
+            };
+
+            CPU_NONRET_0_0: cpu-nonretentive-0-0 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x90000000>;
+                entry-latency-us = <250>;
+                exit-latency-us = <500>;
+                min-residency-us = <950>;
+            };
+
+            CLUSTER_RET_0: cluster-retentive-0 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x11000000>;
+                local-timer-stop;
+                entry-latency-us = <50>;
+                exit-latency-us = <100>;
+                min-residency-us = <250>;
+                wakeup-latency-us = <130>;
+            };
+
+            CLUSTER_NONRET_0: cluster-nonretentive-0 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x91000000>;
+                local-timer-stop;
+                entry-latency-us = <600>;
+                exit-latency-us = <1100>;
+                min-residency-us = <2700>;
+                wakeup-latency-us = <1500>;
+            };
+
+            CPU_RET_1_0: cpu-retentive-1-0 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x10000010>;
+                entry-latency-us = <20>;
+                exit-latency-us = <40>;
+                min-residency-us = <80>;
+            };
+
+            CPU_NONRET_1_0: cpu-nonretentive-1-0 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x90000010>;
+                entry-latency-us = <250>;
+                exit-latency-us = <500>;
+                min-residency-us = <950>;
+            };
+
+            CLUSTER_RET_1: cluster-retentive-1 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x11000010>;
+                local-timer-stop;
+                entry-latency-us = <50>;
+                exit-latency-us = <100>;
+                min-residency-us = <250>;
+                wakeup-latency-us = <130>;
+            };
+
+            CLUSTER_NONRET_1: cluster-nonretentive-1 {
+                compatible = "riscv,idle-state";
+                riscv,sbi-suspend-param = <0x91000010>;
+                local-timer-stop;
+                entry-latency-us = <600>;
+                exit-latency-us = <1100>;
+                min-residency-us = <2700>;
+                wakeup-latency-us = <1500>;
+            };
+        };
+    };
+
 ...
index d318fccf78f109631507c130c649eb4593ebaacb..2bdd05af6079bcc3343fd71f374ae663ed0b8552 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos NoC (Network on Chip) Probe
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   The Samsung Exynos542x SoC has a NoC (Network on Chip) Probe for NoC bus.
index c9a8cb5fd55582fbd0c35d5ddf5ff9960b21eb24..e300df4b47f3df4da7123b8a399b5f2a2db2e789 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos SoC PPMU (Platform Performance Monitoring Unit)
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   The Samsung Exynos SoC has PPMU (Platform Performance Monitoring Unit) for
index 62c3bd4cb28d89f36d994fce9dd58292c68ec801..7257fd0ae4da87151f909d60d99f12cc93381b28 100644 (file)
@@ -51,7 +51,6 @@ properties:
           Video port for MIPI DPI output (panel or connector).
 
     required:
-      - port@0
       - port@1
 
 required:
index 5216c27fc0ada58881235761037162c5873ed0c6..a412a1da950fb9e5aeb33da06ea1733d0324ec58 100644 (file)
@@ -39,7 +39,6 @@ properties:
           Video port for MIPI DPI output (panel or connector).
 
     required:
-      - port@0
       - port@1
 
 required:
index d31483a78eab0ff7f7c888420c61333d0d7c0b37..6fb7e321f011873e2ce4c48b688d653c4b7a8cea 100644 (file)
@@ -160,7 +160,7 @@ examples:
     mdss: mdss@5e00000 {
         #address-cells = <1>;
         #size-cells = <1>;
-        compatible = "qcom,qcm2290-mdss", "qcom,mdss";
+        compatible = "qcom,qcm2290-mdss";
         reg = <0x05e00000 0x1000>;
         reg-names = "mdss";
         power-domains = <&dispcc MDSS_GDSC>;
@@ -180,7 +180,7 @@ examples:
                  <&apps_smmu 0x421 0x0>;
         ranges;
 
-        mdss_mdp: mdp@5e01000 {
+        mdss_mdp: display-controller@5e01000 {
                 compatible = "qcom,qcm2290-dpu";
                 reg = <0x05e01000 0x8f000>,
                       <0x05eb0000 0x2008>;
index f29789994b1804f46e25f888e2b845778eb6e475..c2df8d28aaf5f594b6e1fac2e277ecd161b6f581 100644 (file)
@@ -83,6 +83,8 @@ properties:
 required:
   - compatible
   - reg
+  - width-mm
+  - height-mm
   - panel-timing
 
 unevaluatedProperties: false
index 9bf592dc3033aa3efc90e9ddb721ebca5200c846..7749de95ee405f7d35af13b6c6034b8db95ceb0e 100644 (file)
@@ -71,78 +71,72 @@ properties:
 
   hfront-porch:
     description: Horizontal front porch panel timing
+    $ref: /schemas/types.yaml#/definitions/uint32-array
     oneOf:
-      - $ref: /schemas/types.yaml#/definitions/uint32
-        maxItems: 1
+      - maxItems: 1
         items:
           description: typical number of pixels
-      - $ref: /schemas/types.yaml#/definitions/uint32-array
-        minItems: 3
+      - minItems: 3
         maxItems: 3
         items:
           description: min, typ, max number of pixels
 
   hback-porch:
     description: Horizontal back porch timing
+    $ref: /schemas/types.yaml#/definitions/uint32-array
     oneOf:
-      - $ref: /schemas/types.yaml#/definitions/uint32
-        maxItems: 1
+      - maxItems: 1
         items:
           description: typical number of pixels
-      - $ref: /schemas/types.yaml#/definitions/uint32-array
-        minItems: 3
+      - minItems: 3
         maxItems: 3
         items:
           description: min, typ, max number of pixels
 
   hsync-len:
     description: Horizontal sync length panel timing
+    $ref: /schemas/types.yaml#/definitions/uint32-array
     oneOf:
-      - $ref: /schemas/types.yaml#/definitions/uint32
-        maxItems: 1
+      - maxItems: 1
         items:
           description: typical number of pixels
-      - $ref: /schemas/types.yaml#/definitions/uint32-array
-        minItems: 3
+      - minItems: 3
         maxItems: 3
         items:
           description: min, typ, max number of pixels
 
   vfront-porch:
     description: Vertical front porch panel timing
+    $ref: /schemas/types.yaml#/definitions/uint32-array
     oneOf:
-      - $ref: /schemas/types.yaml#/definitions/uint32
-        maxItems: 1
+      - maxItems: 1
         items:
           description: typical number of lines
-      - $ref: /schemas/types.yaml#/definitions/uint32-array
-        minItems: 3
+      - minItems: 3
         maxItems: 3
         items:
           description: min, typ, max number of lines
 
   vback-porch:
     description: Vertical back porch panel timing
+    $ref: /schemas/types.yaml#/definitions/uint32-array
     oneOf:
-      - $ref: /schemas/types.yaml#/definitions/uint32
-        maxItems: 1
+      - maxItems: 1
         items:
           description: typical number of lines
-      - $ref: /schemas/types.yaml#/definitions/uint32-array
-        minItems: 3
+      - minItems: 3
         maxItems: 3
         items:
           description: min, typ, max number of lines
 
   vsync-len:
     description: Vertical sync length panel timing
+    $ref: /schemas/types.yaml#/definitions/uint32-array
     oneOf:
-      - $ref: /schemas/types.yaml#/definitions/uint32
-        maxItems: 1
+      - maxItems: 1
         items:
           description: typical number of lines
-      - $ref: /schemas/types.yaml#/definitions/uint32-array
-        minItems: 3
+      - minItems: 3
         maxItems: 3
         items:
           description: min, typ, max number of lines
index f998a3a5b71f63603ecda5cc4de9a471249f2673..919734c05c0b1b92ab6d5d7e6d937b178bce95b2 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index cb8e735ce3bd2baaebe750a8719ef51526ae26b9..63379fae36366ebae1df9a2e6cb942e3edb9a2b6 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index ba40284ac66f09cdb3c7491fdf7be9e0180f91be..00e325a19cb1df9171432d15d7d27265fdf808ed 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description:
   Samsung Exynos SoC Mixer is responsible for mixing and blending multiple data
index 6f796835ea03a8bd9da87f869054da2248518e88..7c37470bd32973520be68536343159f72ab9ab59 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   DECON (Display and Enhancement Controller) is the Display Controller for the
index 01fccb138ebd98a563e08587c2a77f69cdb4aa44..c5c6239c28d079a0d92742a1c5105069450e1830 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   MIC (Mobile Image Compressor) resides between DECON and MIPI DSI. MIPI DSI is
index afa137d4792281ff0e425435374db19b6ed23db8..320eedc61a5b5b7dcdbdf29decb0a95467f32865 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   DECON (Display and Enhancement Controller) is the Display Controller for the
index 9cf5f120d5168d8e8d61302b8df9c6707e2391c4..c62ea9d228432bcc23e635c5553ff4531598b638 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index f9ffe3d6f9575b85a4b0f44b8ab3c882eaceb5c6..1289605456408167bc9b8f756ad0d4fdbe2ff58b 100644 (file)
@@ -8,7 +8,7 @@ title: Maxim MAX77843 MicroUSB and Companion Power Management IC Extcon
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77843 MicroUSB
@@ -25,7 +25,7 @@ properties:
     $ref: /schemas/connector/usb-connector.yaml#
 
   ports:
-    $ref: /schemas/graph.yaml#/properties/port
+    $ref: /schemas/graph.yaml#/properties/ports
     description:
       Any connector to the data bus of this controller should be modelled using
       the OF graph bindings specified
index 4d6bfae0653c457a2469adc51f41f61260611be0..85f8d4764740a3d013d4c04f57802d4622ee2302 100644 (file)
@@ -20,6 +20,7 @@ properties:
           - mediatek,mt8183-mali
           - realtek,rtd1619-mali
           - renesas,r9a07g044-mali
+          - renesas,r9a07g054-mali
           - rockchip,px30-mali
           - rockchip,rk3568-mali
       - const: arm,mali-bifrost # Mali Bifrost GPU model/revision is fully discoverable
@@ -109,7 +110,9 @@ allOf:
       properties:
         compatible:
           contains:
-            const: renesas,r9a07g044-mali
+            enum:
+              - renesas,r9a07g044-mali
+              - renesas,r9a07g054-mali
     then:
       properties:
         interrupts:
index 4b5851c326f7a5027fc9746407608d62430ce44a..b1a4c235376ef3ea8e7d31e8265b156276239686 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: LTC4151 High Voltage I2C Current and Voltage Monitor
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index c42051f8a1914f2e91bd3827e286aab48d74529a..028d6e570131fb60e24bf3aea1961d1c2c9fc970 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Microchip MCP3021 A/D converter
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 4669217d01e16311a943b02003fc60cf1d22c372..80df7182ea28b9de30f912173c48dedcd9739164 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Sensirion SHT15 humidity and temperature sensor
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index d3eff4fac1075e51d04ee1791268e7c057c0480e..c5a889e3e27b9316fbd41a2fe9e022264bd699f8 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: TMP102 temperature sensor
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index eda55bbc172dcb7a3876436dac868c7a87c27741..dcbc6fbc3b48f63bf03039aaa3fef3f1422efb1e 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: TMP108 temperature sensor
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 801ca9ba7d344f112449b5689dd6d0fc7a7f32b2..e7493e25a7d2a2d02c6ac98de4875443087636cc 100644 (file)
@@ -58,9 +58,8 @@ patternProperties:
           The value (two's complement) to be programmed in the channel specific N correction register.
           For remote channels only.
         $ref: /schemas/types.yaml#/definitions/int32
-        items:
-          minimum: -128
-          maximum: 127
+        minimum: -128
+        maximum: 127
 
     required:
       - reg
index 19874e8b73b9b1ce907d43ffcbcbca51ceb79072..3e52a0db6c41b16b049dd4cb5d799ff51f196a17 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung's High Speed I2C controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   The Samsung's High Speed I2C controller is used to interface with I2C devices
index 84051b0129c2bc1739531b8b7b65c28601bbbd14..c26230518957e48c4d9d0e9beb3d00f008e14e55 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S3C/S5P/Exynos SoC I2C Controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index cf711082ad7db0fa5d2c2353e14babf5bf480f9b..666414a9c0defd831e4dc561e267790ddeb61f67 100644 (file)
@@ -98,6 +98,7 @@ allOf:
               - ti,adc121s
               - ti,ads7866
               - ti,ads7868
+    then:
       required:
         - vcc-supply
   # Devices with a vref
index 7c260f209687afbd6b0ba453605f7df239573d6a..92f9472a77ae1e5b46acfd2def91a5ec41bb24c1 100644 (file)
@@ -108,9 +108,7 @@ patternProperties:
           - [1-5]: order 1 to 5.
           For audio purpose it is recommended to use order 3 to 5.
         $ref: /schemas/types.yaml#/definitions/uint32
-        items:
-          minimum: 0
-          maximum: 5
+        maximum: 5
 
       "#io-channel-cells":
         const: 1
@@ -174,7 +172,7 @@ patternProperties:
               contains:
                 const: st,stm32-dfsdm-adc
 
-      - then:
+        then:
           properties:
             st,adc-channels:
               minItems: 1
@@ -206,7 +204,7 @@ patternProperties:
               contains:
                 const: st,stm32-dfsdm-dmic
 
-      - then:
+        then:
           properties:
             st,adc-channels:
               maxItems: 1
@@ -254,7 +252,7 @@ allOf:
           contains:
             const: st,stm32h7-dfsdm
 
-  - then:
+    then:
       patternProperties:
         "^filter@[0-9]+$":
           properties:
@@ -269,7 +267,7 @@ allOf:
           contains:
             const: st,stm32mp1-dfsdm
 
-  - then:
+    then:
       patternProperties:
         "^filter@[0-9]+$":
           properties:
index 0d8fb56f4b093a98187bca02dc74bc6f40f77385..65f86f26947cd5d68d24c16bd2957d08169ea719 100644 (file)
@@ -59,9 +59,9 @@ allOf:
           contains:
             enum:
               - adi,ad5371
-      then:
-        required:
-          - vref2-supply
+    then:
+      required:
+        - vref2-supply
 
 examples:
   - |
diff --git a/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml b/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml
new file mode 100644 (file)
index 0000000..b177064
--- /dev/null
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/mediatek,mt6779-keypad.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek's Keypad Controller device tree bindings
+
+maintainers:
+  - Fengping Yu <fengping.yu@mediatek.com>
+
+allOf:
+  - $ref: "/schemas/input/matrix-keymap.yaml#"
+
+description: |
+  Mediatek's Keypad controller is used to interface a SoC with a matrix-type
+  keypad device. The keypad controller supports multiple row and column lines.
+  A key can be placed at each intersection of a unique row and a unique column.
+  The keypad controller can sense a key-press and key-release and report the
+  event using a interrupt to the cpu.
+
+properties:
+  compatible:
+    oneOf:
+      - const: mediatek,mt6779-keypad
+      - items:
+          - enum:
+              - mediatek,mt6873-keypad
+          - const: mediatek,mt6779-keypad
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-names:
+    items:
+      - const: kpd
+
+  wakeup-source:
+    description: use any event on keypad as wakeup event
+    type: boolean
+
+  debounce-delay-ms:
+    maximum: 256
+    default: 16
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/input/input.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+    soc {
+        #address-cells = <2>;
+        #size-cells = <2>;
+
+        keyboard@10010000 {
+          compatible = "mediatek,mt6779-keypad";
+          reg = <0 0x10010000 0 0x1000>;
+          interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_FALLING>;
+          clocks = <&clk26m>;
+          clock-names = "kpd";
+        };
+    };
index 535d92885372b0ec8138a2ff1896953656bbb0b9..9d00f2a8e13a4ece0e8573ff6df7b1ec6fc17b39 100644 (file)
@@ -9,7 +9,10 @@ For MT6397/MT6323 MFD bindings see:
 Documentation/devicetree/bindings/mfd/mt6397.txt
 
 Required properties:
-- compatible: "mediatek,mt6397-keys" or "mediatek,mt6323-keys"
+- compatible: Should be one of:
+       - "mediatek,mt6397-keys"
+       - "mediatek,mt6323-keys"
+       - "mediatek,mt6358-keys"
 - linux,keycodes: See Documentation/devicetree/bindings/input/input.yaml
 
 Optional Properties:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml b/Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml
new file mode 100644 (file)
index 0000000..e3a2b87
--- /dev/null
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/imagis,ist3038c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Imagis IST30XXC family touchscreen controller bindings
+
+maintainers:
+  - Markuss Broks <markuss.broks@gmail.com>
+
+allOf:
+  - $ref: touchscreen.yaml#
+
+properties:
+  $nodename:
+    pattern: "^touchscreen@[0-9a-f]+$"
+
+  compatible:
+    enum:
+      - imagis,ist3038c
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  vdd-supply:
+    description: Power supply regulator for the chip
+
+  vddio-supply:
+    description: Power supply regulator for the I2C bus
+
+  touchscreen-size-x: true
+  touchscreen-size-y: true
+  touchscreen-fuzz-x: true
+  touchscreen-fuzz-y: true
+  touchscreen-inverted-x: true
+  touchscreen-inverted-y: true
+  touchscreen-swapped-x-y: true
+
+additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - touchscreen-size-x
+  - touchscreen-size-y
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    i2c {
+      #address-cells = <1>;
+      #size-cells = <0>;
+      touchscreen@50 {
+        compatible = "imagis,ist3038c";
+        reg = <0x50>;
+        interrupt-parent = <&gpio>;
+        interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
+        vdd-supply = <&ldo1_reg>;
+        vddio-supply = <&ldo2_reg>;
+        touchscreen-size-x = <720>;
+        touchscreen-size-y = <1280>;
+        touchscreen-fuzz-x = <10>;
+        touchscreen-fuzz-y = <10>;
+        touchscreen-inverted-x;
+        touchscreen-inverted-y;
+      };
+    };
+
+...
index 89853b4825133766810527ca9793f8f253e9ea5c..8a676fef8c1d14cd9fbaf2e006575a2be7cb3258 100644 (file)
@@ -93,48 +93,48 @@ allOf:
               - qcom,sdm660-gnoc
               - qcom,sdm660-snoc
 
-      then:
-        properties:
-          clock-names:
-            items:
-              - const: bus
-              - const: bus_a
-
-          clocks:
-            items:
-              - description: Bus Clock
-              - description: Bus A Clock
-
-        # Child node's properties
-        patternProperties:
-          '^interconnect-[a-z0-9]+$':
-            type: object
-            description:
-              snoc-mm is a child of snoc, sharing snoc's register address space.
-
-            properties:
-              compatible:
-                enum:
-                  - qcom,msm8939-snoc-mm
-
-              '#interconnect-cells':
-                const: 1
-
-              clock-names:
-                items:
-                  - const: bus
-                  - const: bus_a
-
-              clocks:
-                items:
-                  - description: Bus Clock
-                  - description: Bus A Clock
-
-            required:
-              - compatible
-              - '#interconnect-cells'
-              - clock-names
-              - clocks
+    then:
+      properties:
+        clock-names:
+          items:
+            - const: bus
+            - const: bus_a
+
+        clocks:
+          items:
+            - description: Bus Clock
+            - description: Bus A Clock
+
+      # Child node's properties
+      patternProperties:
+        '^interconnect-[a-z0-9]+$':
+          type: object
+          description:
+            snoc-mm is a child of snoc, sharing snoc's register address space.
+
+          properties:
+            compatible:
+              enum:
+                - qcom,msm8939-snoc-mm
+
+            '#interconnect-cells':
+              const: 1
+
+            clock-names:
+              items:
+                - const: bus
+                - const: bus_a
+
+            clocks:
+              items:
+                - description: Bus Clock
+                - description: Bus A Clock
+
+          required:
+            - compatible
+            - '#interconnect-cells'
+            - clock-names
+            - clocks
 
   - if:
       properties:
index 372ccbfae7716f590d9a2b57ecbeeec445611a9f..5a583bf3dbc105a010a3ba1f166c961a536919b0 100644 (file)
@@ -7,10 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Marvell MMP/Orion Interrupt controller bindings
 
 maintainers:
-  - Thomas Gleixner <tglx@linutronix.de>
-  - Jason Cooper <jason@lakedaemon.net>
-  - Marc Zyngier <maz@kernel.org>
-  - Rob Herring <robh+dt@kernel.org>
+  - Andrew Lunn <andrew@lunn.ch>
+  - Gregory Clement <gregory.clement@bootlin.com>
 
 allOf:
   - if:
index d631b7589d506403b7cd4441d236026285dd2546..72456a07dac968dc364f47282460c96c0948b980 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC Interrupt Combiner Controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   Samsung's Exynos4 architecture includes a interrupt combiner controller which
index 86a0005cf1569982ad37d07187b798d64e84c13c..e27f57bb52ae8bf0a432b6aacd66fec9e7eb6b88 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX77693 MicroUSB and Companion Power Management IC LEDs
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77693 MicroUSB Integrated
index 36781ee4617f9a8065ed9138166b575b2c33e665..c9d5adbc8c4a2a1da81739519a78dceb14e6ee6a 100644 (file)
@@ -65,7 +65,6 @@ properties:
   iram:
     $ref: /schemas/types.yaml#/definitions/phandle
     description: phandle pointing to the SRAM device node
-    maxItems: 1
 
 required:
   - compatible
index 9b179bb44dfb677ff5a9fa2ecd54913a949c3808..aa55ca65d6ed6d4fa31aa6ccebcc83ad42dfb2f2 100644 (file)
@@ -63,13 +63,11 @@ properties:
 
   mediatek,vpu:
     $ref: /schemas/types.yaml#/definitions/phandle
-    maxItems: 1
     description:
       Describes point to vpu.
 
   mediatek,scp:
     $ref: /schemas/types.yaml#/definitions/phandle
-    maxItems: 1
     description:
       Describes point to scp.
 
index e7b65a91c92c26ffbe7c3d8dc6365f5f7ba01fcb..deb5b657a2d5826522571f718625d0d4737a1099 100644 (file)
@@ -55,13 +55,11 @@ properties:
 
   mediatek,vpu:
     $ref: /schemas/types.yaml#/definitions/phandle
-    maxItems: 1
     description:
       Describes point to vpu.
 
   mediatek,scp:
     $ref: /schemas/types.yaml#/definitions/phandle
-    maxItems: 1
     description:
       Describes point to scp.
 
@@ -106,7 +104,6 @@ allOf:
           enum:
             - mediatek,mt8173-vcodec-enc
             - mediatek,mt8192-vcodec-enc
-            - mediatek,mt8173-vcodec-enc
 
     then:
       properties:
index 7687be0f50aa54e8709bb3aabc4bea94ff0bf3a3..c73bf2352aca6a9d8ffd019211bfab857fcd6e56 100644 (file)
@@ -61,7 +61,6 @@ properties:
 
   mediatek,scp:
     $ref: /schemas/types.yaml#/definitions/phandle
-    maxItems: 1
     description: |
       The node of system control processor (SCP), using
       the remoteproc & rpmsg framework.
index 769f13250047434202810cf603503617c2d27330..08cbdcddfead0403cfdddfb9b1695f4c4e63c5a4 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: DDR PHY Front End (DPFE) for Broadcom STB
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Markus Mayer <mmayer@broadcom.com>
 
 properties:
index f3e62ee07126ab3b760d7340844bd7d7711ef14b..1daa66592477e36dc99c6b02a86ef73ca03edb90 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: LPDDR2 SDRAM AC timing parameters for a given speed-bin
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index dd2141cad866442928d34e8eedd9b1b08a4dd177..9d78f140609b6c116e2b692b0c1b4dac8fa65a89 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: LPDDR2 SDRAM compliant to JEDEC JESD209-2
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 97c3e988af5f4284b4ad44acfc7e987f5bd05c14..5c6512c1e1e37b0ea0a236b0a3790cb36c1c5d18 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: LPDDR3 SDRAM AC timing parameters for a given speed-bin
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index c542f32c39fa98bfc6927a9c6e220e7fa7981b6f..48908a19473c3f349b02d5bb46b97e0773ed4aae 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: LPDDR3 SDRAM compliant to JEDEC JESD209-3
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 14a6bc8f421fccbeb86dd56c08afb942cb71877d..9249624c4fa009673e326cf97bf11e0764b4253a 100644 (file)
@@ -8,7 +8,7 @@ title: Marvell MVEBU SDRAM controller
 
 maintainers:
   - Jan Luebbe <jlu@pengutronix.de>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 9566b3421f0394101370e27a93950da166c2231d..0c511ab906bf897b158b09a9d6916a40882852c9 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Qualcomm Atheros AR7xxx/AR9xxx DDR controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   The DDR controller of the AR7xxx and AR9xxx families provides an interface to
index 2b18cef99511f0e4f64cdc78f59ad7d7918caa5f..514b2c5f885869f5f5fc6c0dc1604b1fc5b9812e 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: H8/300 bus controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Yoshinori Sato <ysato@users.sourceforge.jp>
 
 properties:
index f152243f6b180696073548212c50541763752376..098348b2b815eab70a680e8701c37bea1e75f2c8 100644 (file)
@@ -9,7 +9,7 @@ title: |
   Controller device
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Lukasz Luba <lukasz.luba@arm.com>
 
 description: |
index fb7ae38a9c86672d2f7c3afd707af9ba66d975b9..f46e95704f532a62991dfe2f8ef14b483cdced2b 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Synopsys IntelliDDR Multi Protocol memory controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Manish Narani <manish.narani@xilinx.com>
   - Michal Simek <michal.simek@xilinx.com>
 
@@ -24,9 +24,9 @@ description: |
 properties:
   compatible:
     enum:
+      - snps,ddrc-3.80a
       - xlnx,zynq-ddrc-a05
       - xlnx,zynqmp-ddrc-2.40a
-      - snps,ddrc-3.80a
 
   interrupts:
     maxItems: 1
@@ -43,7 +43,9 @@ allOf:
       properties:
         compatible:
           contains:
-            const: xlnx,zynqmp-ddrc-2.40a
+            enum:
+              - snps,ddrc-3.80a
+              - xlnx,zynqmp-ddrc-2.40a
     then:
       required:
         - interrupts
index 9ed51185ff996b304c506db9113f36cd7759aeb8..382ddab60fbda10be248a587782967e4c2ee8b02 100644 (file)
@@ -8,7 +8,7 @@ title: Texas Instruments da8xx DDR2/mDDR memory controller
 
 maintainers:
   - Bartosz Golaszewski <bgolaszewski@baylibre.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   Documentation:
index 27870b8760a6d492d46156fe82b694d16733600c..52edd1bf549f6d863f5662e77ac906f23f6f515f 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX14577/MAX77836 MicroUSB and Companion Power Management IC
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX14577/MAX77836 MicroUSB
index 859655a789c3bc1c6bccfe3f28611f9e4b8fb0ae..d027aabe453ba5d3a789244d37119768561e1dfd 100644 (file)
@@ -8,7 +8,7 @@ title: Maxim MAX77686 Power Management IC
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77686 Power Management
index 906101197e113c4317cb81437ad5ecc17200301d..1b06a77ec79895cc0174b1e17b20625cf1867982 100644 (file)
@@ -8,7 +8,7 @@ title: Maxim MAX77693 MicroUSB and Companion Power Management IC
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77693 MicroUSB
index baa1346ac5d5a5fc71196e19e602ab3dd42dfebe..ad2013900b0378c71b8d8dff20d9bb32ae752b2a 100644 (file)
@@ -8,7 +8,7 @@ title: Maxim MAX77802 Power Management IC
 
 maintainers:
   - Javier Martinez Canillas <javier@dowhile0.org>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77802 Power Management
index 61a0f9dcb9837de14afa73b6e704381bb4c03785..f30f96bbff43ab85c4b5bd273ae9dcbb9227d733 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX77843 MicroUSB and Companion Power Management IC
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77843 MicroUSB
index bae55c98961c52878cdd5afa8029d7a08888fde0..f7bb67d10eff3618fe9542ff096402863a7b67bf 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC Low Power Audio Subsystem (LPASS)
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
 properties:
index 017befdf8adb5a7cd1b26490a68b9b5c983d6600..055dfc337c2f943ab98ec173caec202cf7fbe983 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPA01 Power Management IC
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index 771b3f16da965caf04175d4ebf5627b356053a03..5ff6546c72b79a54723d00b6de093df2d22fca31 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPS11/13/14/15 and S2MPU02 Power Management IC
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index 5531718abdf07d49fd7096b88a20415b5aeda8f0..10c7b408f33aa3d815578fcf1134ed5a9a94eec6 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S5M8767 Power Management IC
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index ce64b34983785a9217335220fc9408e46f997647..f3f4d5b02744851b1d2cb18965c86af78e11d052 100644 (file)
@@ -197,6 +197,8 @@ allOf:
               - nvidia,tegra30-sdhci
               - nvidia,tegra114-sdhci
               - nvidia,tegra124-sdhci
+    then:
+      properties:
         clocks:
           items:
             - description: module clock
index 8756060895a8e54a430debcbba1dffafb20f2a0a..99ee4b5b9346c0c710f013e66951d76ea8bef3ba 100644 (file)
@@ -27,32 +27,25 @@ description:
   The realtek-mdio driver is an MDIO driver and it must be inserted inside
   an MDIO node.
 
+  The compatible string is only used to identify which (silicon) family the
+  switch belongs to. Roughly speaking, a family is any set of Realtek switches
+  whose chip identification register(s) have a common location and semantics.
+  The different models in a given family can be automatically disambiguated by
+  parsing the chip identification register(s) according to the given family,
+  avoiding the need for a unique compatible string for each model.
+
 properties:
   compatible:
     enum:
       - realtek,rtl8365mb
-      - realtek,rtl8366
       - realtek,rtl8366rb
-      - realtek,rtl8366s
-      - realtek,rtl8367
-      - realtek,rtl8367b
-      - realtek,rtl8367rb
-      - realtek,rtl8367s
-      - realtek,rtl8368s
-      - realtek,rtl8369
-      - realtek,rtl8370
     description: |
-      realtek,rtl8365mb: 4+1 ports
-      realtek,rtl8366: 5+1 ports
-      realtek,rtl8366rb: 5+1 ports
-      realtek,rtl8366s: 5+1 ports
-      realtek,rtl8367:
-      realtek,rtl8367b:
-      realtek,rtl8367rb: 5+2 ports
-      realtek,rtl8367s: 5+2 ports
-      realtek,rtl8368s: 8 ports
-      realtek,rtl8369: 8+1 ports
-      realtek,rtl8370: 8+2 ports
+      realtek,rtl8365mb:
+        Use with models RTL8363NB, RTL8363NB-VB, RTL8363SC, RTL8363SC-VB,
+        RTL8364NB, RTL8364NB-VB, RTL8365MB, RTL8366SC, RTL8367RB-VB, RTL8367S,
+        RTL8367SB, RTL8370MB, RTL8310SR
+      realtek,rtl8366rb:
+        Use with models RTL8366RB, RTL8366S
 
   mdc-gpios:
     description: GPIO line for the MDC clock line.
@@ -335,7 +328,7 @@ examples:
             #size-cells = <0>;
 
             switch@29 {
-                    compatible = "realtek,rtl8367s";
+                    compatible = "realtek,rtl8365mb";
                     reg = <29>;
 
                     reset-gpios = <&gpio2 20 GPIO_ACTIVE_LOW>;
index 15a45db3899a6eb2a0ff087e0bea0fdbbd55c570..1bcaf6ba822cbbad131a7dc1c2f86281773d2edf 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Marvell International Ltd. NCI NFC controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 7465aea2e1c040ee156c68ed251886651658e0cd..e381a3c148368b799f167f50a6c2a8c989b22045 100644 (file)
@@ -8,7 +8,7 @@ title: NXP Semiconductors NCI NFC controller
 
 maintainers:
   - Charles Gorand <charles.gorand@effinnov.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index d8ba5a18db98d534d6a20bf8bc8e1231548c1014..0509e0166345a1fe67d9127297612b1afd94a222 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: NXP Semiconductors PN532 NFC controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index d520414de4636a858f9dc51bbdcd7041ee8eeca7..18b3a7d819df5ba1ebc646d12206dd71c5329d96 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: NXP Semiconductors PN544 NFC Controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index a6a1bc788d29a4ff924da506c8f437c908ee538f..ef1155038a2fcdbe529fc9e328e24ffe688a12c6 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: STMicroelectronics ST NCI NFC controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 4356eacde8aa8b57bb754f115697da1387c0c6e0..8a7274357b46f19309d333b97cb80fc518c5f2a0 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: STMicroelectronics SAS ST21NFCA NFC controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index d3bca376039ef34b1b898f8d9dd4f94c6b4874c7..963d9531a856a25b187b92cc42f9f72df2957fe9 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: STMicroelectronics ST95HF NFC controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 40da2ac989780cfd87abfc8bab6c0cfaaf9bace1..404c8df993640e1551b3287e8e35a568b2e546c2 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Texas Instruments TRF7970A RFID/NFC/15693 Transceiver
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Mark Greer <mgreer@animalcreek.com>
 
 properties:
index 2d5248f5b91944b8b0382c7cc0c7af0e4c0f0de6..36c85eb3dc0de6f7fb4874f0e35da858f8984977 100644 (file)
@@ -53,20 +53,18 @@ properties:
         - allwinner,sun8i-r40-gmac
         - allwinner,sun8i-v3s-emac
         - allwinner,sun50i-a64-emac
-        - loongson,ls2k-dwmac
-        - loongson,ls7a-dwmac
         - amlogic,meson6-dwmac
         - amlogic,meson8b-dwmac
         - amlogic,meson8m2-dwmac
         - amlogic,meson-gxbb-dwmac
         - amlogic,meson-axg-dwmac
-        - loongson,ls2k-dwmac
-        - loongson,ls7a-dwmac
         - ingenic,jz4775-mac
         - ingenic,x1000-mac
         - ingenic,x1600-mac
         - ingenic,x1830-mac
         - ingenic,x2000-mac
+        - loongson,ls2k-dwmac
+        - loongson,ls7a-dwmac
         - rockchip,px30-gmac
         - rockchip,rk3128-gmac
         - rockchip,rk3228-gmac
index e602761f7b149e7d434151dced31586144650142..b0ebcef6801ce50f728b24e70251125d8e1d373c 100644 (file)
@@ -13,9 +13,6 @@ description: |
   This describes the devicetree bindings for AVE ethernet controller
   implemented on Socionext UniPhier SoCs.
 
-allOf:
-  - $ref: ethernet-controller.yaml#
-
 properties:
   compatible:
     enum:
@@ -44,25 +41,13 @@ properties:
     minItems: 1
     maxItems: 4
 
-  clock-names:
-    oneOf:
-      - items:          # for Pro4
-          - const: gio
-          - const: ether
-          - const: ether-gb
-          - const: ether-phy
-      - const: ether    # for others
+  clock-names: true
 
   resets:
     minItems: 1
     maxItems: 2
 
-  reset-names:
-    oneOf:
-      - items:          # for Pro4
-          - const: gio
-          - const: ether
-      - const: ether    # for others
+  reset-names: true
 
   socionext,syscon-phy-mode:
     $ref: /schemas/types.yaml#/definitions/phandle-array
@@ -78,6 +63,42 @@ properties:
     $ref: mdio.yaml#
     unevaluatedProperties: false
 
+allOf:
+  - $ref: ethernet-controller.yaml#
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: socionext,uniphier-pro4-ave4
+    then:
+      properties:
+        clocks:
+          minItems: 4
+          maxItems: 4
+        clock-names:
+          items:
+            - const: gio
+            - const: ether
+            - const: ether-gb
+            - const: ether-phy
+        resets:
+          minItems: 2
+          maxItems: 2
+        reset-names:
+          items:
+            - const: gio
+            - const: ether
+    else:
+      properties:
+        clocks:
+          maxItems: 1
+        clock-names:
+          const: ether
+        resets:
+          maxItems: 1
+        reset-names:
+          const: ether
+
 required:
   - compatible
   - reg
@@ -90,7 +111,7 @@ required:
   - reset-names
   - mdio
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index dbfca5ee91399466bba2b184c109da4acdf27574..6f44f9516c3647dedcc684ca45c338be366d7f87 100644 (file)
@@ -56,6 +56,7 @@ if:
     compatible:
       contains:
         const: ti,davinci_mdio
+then:
   required:
     - bus_freq
 
index dfde0eaf66e125590daf4f2b912b936c28baa835..d61585c96e319ef309f087b69111e82d77b7cb5b 100644 (file)
@@ -275,17 +275,17 @@ allOf:
           - nvidia,hssquelch-level
           - nvidia,hsdiscon-level
 
-        else:
-          properties:
-            clocks:
-              maxItems: 4
+      else:
+        properties:
+          clocks:
+            maxItems: 4
 
-            clock-names:
-              items:
-                - const: reg
-                - const: pll_u
-                - const: timer
-                - const: utmi-pads
+          clock-names:
+            items:
+              - const: reg
+              - const: pll_u
+              - const: timer
+              - const: utmi-pads
 
   - if:
       properties:
index e23e5590eaa3d0e60caa59a75063e86ec678b650..0655e485b2604a9bb81f7b5cd228f9caa1ba8a1e 100644 (file)
@@ -14,24 +14,24 @@ if:
     compatible:
       contains:
         const: qcom,usb-hs-phy-apq8064
-  then:
-    properties:
-      resets:
-        maxItems: 1
+then:
+  properties:
+    resets:
+      maxItems: 1
 
-      reset-names:
-        const: por
+    reset-names:
+      const: por
 
-  else:
-    properties:
-      resets:
-        minItems: 2
-        maxItems: 2
+else:
+  properties:
+    resets:
+      minItems: 2
+      maxItems: 2
 
-      reset-names:
-        items:
-          - const: phy
-          - const: por
+    reset-names:
+      items:
+        - const: phy
+        - const: por
 
 properties:
   compatible:
@@ -92,6 +92,8 @@ additionalProperties: false
 examples:
   - |
     otg: usb-controller {
+      #reset-cells = <1>;
+
       ulpi {
         phy {
           compatible = "qcom,usb-hs-phy-msm8974", "qcom,usb-hs-phy";
index 838c6d480ce62a93a8e3215de5b5ef85034c0b90..b03b2f00cc5b7e7aa6753a149dec2680f21bdd72 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC DisplayPort PHY
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Marek Szyprowski <m.szyprowski@samsung.com>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
index c61574e10b2a7ab85bbed9072c0b85a8c0d3bb01..3e5f035de2e9c0b88892f4433ff0c5bd0e2c600a 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 62b39bb465858d784cb94a11af557550d5676a21..8751e559484fae38b494588d959e7bb286405738 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos5250 SoC SATA PHY
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Marek Szyprowski <m.szyprowski@samsung.com>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
index 54aa056b224d15e15b9be2554ef55071aa96f8f9..415440aaad89160bff8bd2480e6d17071b5665a8 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S5P/Exynos SoC MIPI CSIS/DSIM DPHY
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Marek Szyprowski <m.szyprowski@samsung.com>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
index 056e270a4e88e52b10453a51d1ba3a827d06f65e..d9f22a801cbf7d8ab0293affb3b1cce698563147 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S5P/Exynos SoC USB 2.0 PHY
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Marek Szyprowski <m.szyprowski@samsung.com>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
index f83f0f8135b940e420638c0d790a073cd7c194f4..5ba55f9f20cc58cf19434d3d4d04406568d5d291 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC USB 3.0 DRD PHY USB 2.0 PHY
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Marek Szyprowski <m.szyprowski@samsung.com>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
index 8a90d82737676c8101259bb7335e5a69f8b4112a..6bd42e43cdabe3bfcd0be176501f4ef4f32ae574 100644 (file)
@@ -48,13 +48,12 @@ properties:
               Name of one pin group to configure.
             enum: [ aif1, aif2, aif3, aif4, mif1, mif2, mif3, pdmspk1,
                     pdmspk2, dmic4, dmic5, dmic6, gpio1, gpio2, gpio3,
-                    gpio4, gpio5, gpio6, gpio7, gpio7, gpio8, gpio9,
+                    gpio4, gpio5, gpio6, gpio7, gpio8, gpio9,
                     gpio10, gpio11, gpio12, gpio13, gpio14, gpio15,
-                    gpio16, gpio17, gpio17, gpio18, gpio19, gpio20,
-                    gpio21, gpio22, gpio23, gpio24, gpio25, gpio26,
-                    gpio27, gpio27, gpio28, gpio29, gpio30, gpio31,
-                    gpio32, gpio33, gpio34, gpio35, gpio36, gpio37,
-                    gpio37, gpio38, gpio39 ]
+                    gpio16, gpio17, gpio18, gpio19, gpio20, gpio21,
+                    gpio22, gpio23, gpio24, gpio25, gpio26, gpio27,
+                    gpio28, gpio29, gpio30, gpio31, gpio32, gpio33,
+                    gpio34, gpio35, gpio36, gpio37, gpio38, gpio39 ]
 
           function:
             description:
index f73348c5474838da04afd3e5217616b651ab5485..8cf3c47ab86b2996c71471af740725b0d41ba6f9 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S3C/S5P/Exynos SoC pin controller - gpio bank
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index c71939ac8b636ed851abd8c5b7c6d014a1644c4c..9869d4dceddbbb71e572bdb65161efb0b2866209 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S3C/S5P/Exynos SoC pin controller - pins configuration
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index a822f70f5702e61d69bb6681a7fb89f73db685d3..1de91a51234df4908625264615788d0d316f76fc 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S3C/S5P/Exynos SoC pin controller - wake-up interrupt controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 989e48c051cff44d4c900cb2830fa480ce339c4f..3a65c66ca71d226691befcb2a7b0f5fbf2e8726c 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S3C/S5P/Exynos SoC pin controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 4d293b2b2f8466da1ceecd049c0b0e3070cdf8cd..d77fc88050c8afb0d4a9fb858f374b7e7e9786fb 100644 (file)
@@ -36,7 +36,8 @@ properties:
   cpus:
     $ref: /schemas/types.yaml#/definitions/phandle-array
     items:
-      maxItems: 1
+      minItems: 1
+      maxItems: 4
     description: |
       Array of phandles pointing to CPU cores, which should match the order of
       CPU cores used by the WUPCR and PSTR registers in the Advanced Power
index f8461f06e6f430a9c8b2f62c0fa7c4408ca3466b..118cf484cc69f8095a65cb408fdd3c0562093d85 100644 (file)
@@ -16,7 +16,6 @@ allOf:
 properties:
   compatible:
     enum:
-      - ti,bq24150
       - ti,bq24150
       - ti,bq24150a
       - ti,bq24151
index 3978b48299de188bb2704d83ee5c0acd7e6ce006..4d3a1d09036f6a359e10bf2aa5c854998b0eae49 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX14577/MAX77836 MicroUSB and Companion Power Management IC Charger
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX14577/MAX77836 MicroUSB
index a21dc1a8890ffb65ac4176a579193447182ef10f..f5fd53debbc8e0161628df1e98f63bae91da6058 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX77693 MicroUSB and Companion Power Management IC Charger
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77693 MicroUSB Integrated
index 9b131c6facbc0032b4a994b9f0f374397380c179..84eeaef179a5c47bb40f817cc7ac29d6b9a17dea 100644 (file)
@@ -18,23 +18,23 @@ description:
 
 allOf:
   - $ref: "regulator.yaml#"
-
-if:
-  properties:
-    compatible:
-      contains:
-        const: regulator-fixed-clock
-  required:
-    - clocks
-else:
-  if:
-    properties:
-      compatible:
-        contains:
-          const: regulator-fixed-domain
-    required:
-      - power-domains
-      - required-opps
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: regulator-fixed-clock
+    then:
+      required:
+        - clocks
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: regulator-fixed-domain
+    then:
+      required:
+        - power-domains
+        - required-opps
 
 properties:
   compatible:
index 16f01886a60143ae3b5ecd85f5a535d0ccd00b06..285dc7122977e3794b7bcf7106e3d2cc7640995d 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX14577/MAX77836 MicroUSB and Companion Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX14577/MAX77836 MicroUSB
index bb64b679f765a726a9748169629113a897ef961b..0e7cd4b3ace019dd0c7f3c2b68058872c4feba1c 100644 (file)
@@ -8,7 +8,7 @@ title: Maxim MAX77686 Power Management IC regulators
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77686 Power Management
index 20d8559bdc2b8cda1f13efec578f2bd26b7fa63d..945a539749e8974fdda46c62e304e1c8c7376fcd 100644 (file)
@@ -8,7 +8,7 @@ title: Maxim MAX77693 MicroUSB and Companion Power Management IC regulators
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77693 MicroUSB Integrated
index f2b4dd15a0f366892b77c1831f3468e15cf219b6..236348c4710c9d9d3122ee843d3726ae1fef96e6 100644 (file)
@@ -8,7 +8,7 @@ title: Maxim MAX77802 Power Management IC regulators
 
 maintainers:
   - Javier Martinez Canillas <javier@dowhile0.org>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77802 Power Management
index a963025e96c12ef586ad3e074f5e911d978febe8..9695e72428829f95dd5c0c6187b5aab91e4161fa 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX77843 MicroUSB and Companion Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77843 MicroUSB Integrated
index e4e8c58f6046feefcf80f0e9e7b5701c2a63a19f..3ff0d7d980e9775555e3e368e9f9c3bfa37a1ff6 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX8952 voltage regulator
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 allOf:
   - $ref: regulator.yaml#
index 5898dcf10f0672bc574ebee8e1d08318017b5ec6..b92eef68c19f6925b8dda28802aedbc8fa9897c3 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX8973/MAX77621 voltage regulator
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 allOf:
   - $ref: regulator.yaml#
index d5a44ca3df0400535155c7e794af04f12de76b89..4321f061a7f6249c407db73c1091d1a7b13cffc0 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX8997 Power Management IC
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   The Maxim MAX8997 is a Power Management IC which includes voltage and current
index 0627dec513da0d1372a1a83e9973b23bf952986c..0f9eb317ba9a5d3258616af8351af8cea64829fa 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPA01 Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index e3b780715f446d36ffc7a630f1886a0d872cc91b..f1c50dcd0b04915b92a30e80dbe208b1330fa21d 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPS11 Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index 579d77aefc3f0c0e9fcb56542650718746a2493e..53b105a4ead1a05a604d42b5231b8a986b597358 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPS13 Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index fdea290b3e949b9df9017472227ba1e65643ddac..01f9d4e236e94901e9eb6e7f2d731d00c06cb77a 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPS14 Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index b3a883c94628933b7e5c6e453eb47394ef64e29f..9576c2df45a61ba714e64dbc7d29b9cee7eece5b 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPS15 Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index 0ded6953e3b67b708077c27ef2a9d53700a1a6e7..39b652c3c3c486a4703c1feb183b35b17b03c968 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPU02 Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index 3c1617b66861ecb7b14fa79db3e2086904532cbd..172631ca3c25cd84fba0d5297f3c03d74061457b 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S5M8767 Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index 2424de733ee43235ae0c15879d334a7f61f3ccd1..d99a729d27107655e931cfccd5642f0cee2ab2de 100644 (file)
@@ -104,8 +104,7 @@ properties:
   qcom,smem-state-names:
     $ref: /schemas/types.yaml#/definitions/string
     description: The names of the state bits used for SMP2P output
-    items:
-      - const: stop
+    const: stop
 
   glink-edge:
     type: object
@@ -130,7 +129,6 @@ properties:
       qcom,remote-pid:
         $ref: /schemas/types.yaml#/definitions/uint32
         description: ID of the shared memory used by GLINK for communication with WPSS
-        maxItems: 1
 
     required:
       - interrupts
index b0c41ab1a746a60bfeef3060c54c887e28107164..cdfcf32c53fa9371833cd77dbc16e11f2a789d2e 100644 (file)
@@ -24,6 +24,11 @@ properties:
           - const: hisilicon,hi3670-reset
           - const: hisilicon,hi3660-reset
 
+  hisi,rst-syscon:
+    deprecated: true
+    description: phandle of the reset's syscon, use hisilicon,rst-syscon instead
+    $ref: /schemas/types.yaml#/definitions/phandle
+
   hisilicon,rst-syscon:
     description: phandle of the reset's syscon.
     $ref: /schemas/types.yaml#/definitions/phandle
index 377a7d242323d01fd1e1963c067c2d515d331307..6566804ec5674354cd1f5330a8031dcb658b5cc7 100644 (file)
@@ -55,6 +55,9 @@ properties:
   "#reset-cells":
     const: 1
 
+  resets:
+    maxItems: 1
+
 additionalProperties: false
 
 required:
index aa5fb64d57eb4f46692ac45c70d22bf4ee7b8459..d632ac76532e9b5a7b4975b070845a0a619b427b 100644 (file)
@@ -99,6 +99,14 @@ properties:
       - compatible
       - interrupt-controller
 
+  cpu-idle-states:
+    $ref: '/schemas/types.yaml#/definitions/phandle-array'
+    items:
+      maxItems: 1
+    description: |
+      List of phandles to idle state nodes supported
+      by this hart (see ./idle-states.yaml).
+
 required:
   - riscv,isa
   - interrupt-controller
index a50c34d5d199a789af1b295858e2c40c1075a58b..765d9f9edd6ef437925d0ff25d143e3c4f84269c 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC True Random Number Generator
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Łukasz Stelmach <l.stelmach@samsung.com>
 
 properties:
index 84bf518a55493fe23131296f89ebd7e9cb2e92b2..4754174e9849b78373a68f40057be73ea44c0366 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: TimerIO Random Number Generator
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index beeb90e557278d45f57860783f3c27d020d9d223..0b767fec39d87f48665fde70f1461afb9911faa7 100644 (file)
@@ -16,16 +16,22 @@ properties:
 
   compatible:
     oneOf:
-      - const: allwinner,sun6i-a31-rtc
-      - const: allwinner,sun8i-a23-rtc
-      - const: allwinner,sun8i-h3-rtc
-      - const: allwinner,sun8i-r40-rtc
-      - const: allwinner,sun8i-v3-rtc
-      - const: allwinner,sun50i-h5-rtc
+      - enum:
+          - allwinner,sun6i-a31-rtc
+          - allwinner,sun8i-a23-rtc
+          - allwinner,sun8i-h3-rtc
+          - allwinner,sun8i-r40-rtc
+          - allwinner,sun8i-v3-rtc
+          - allwinner,sun50i-h5-rtc
+          - allwinner,sun50i-h6-rtc
+          - allwinner,sun50i-h616-rtc
+          - allwinner,sun50i-r329-rtc
       - items:
           - const: allwinner,sun50i-a64-rtc
           - const: allwinner,sun8i-h3-rtc
-      - const: allwinner,sun50i-h6-rtc
+      - items:
+          - const: allwinner,sun20i-d1-rtc
+          - const: allwinner,sun50i-r329-rtc
 
   reg:
     maxItems: 1
@@ -37,7 +43,12 @@ properties:
       - description: RTC Alarm 1
 
   clocks:
-    maxItems: 1
+    minItems: 1
+    maxItems: 4
+
+  clock-names:
+    minItems: 1
+    maxItems: 4
 
   clock-output-names:
     minItems: 1
@@ -85,6 +96,7 @@ allOf:
             enum:
               - allwinner,sun8i-h3-rtc
               - allwinner,sun50i-h5-rtc
+              - allwinner,sun50i-h6-rtc
 
     then:
       properties:
@@ -96,19 +108,68 @@ allOf:
       properties:
         compatible:
           contains:
-            const: allwinner,sun50i-h6-rtc
+            const: allwinner,sun50i-h616-rtc
 
     then:
       properties:
-        clock-output-names:
+        clocks:
           minItems: 3
           maxItems: 3
+          items:
+            - description: Bus clock for register access
+            - description: 24 MHz oscillator
+            - description: 32 kHz clock from the CCU
+
+        clock-names:
+          minItems: 3
+          maxItems: 3
+          items:
+            - const: bus
+            - const: hosc
+            - const: pll-32k
+
+      required:
+        - clocks
+        - clock-names
 
   - if:
       properties:
         compatible:
           contains:
-            const: allwinner,sun8i-r40-rtc
+            const: allwinner,sun50i-r329-rtc
+
+    then:
+      properties:
+        clocks:
+          minItems: 3
+          maxItems: 4
+          items:
+            - description: Bus clock for register access
+            - description: 24 MHz oscillator
+            - description: AHB parent for internal SPI clock
+            - description: External 32768 Hz oscillator
+
+        clock-names:
+          minItems: 3
+          maxItems: 4
+          items:
+            - const: bus
+            - const: hosc
+            - const: ahb
+            - const: ext-osc32k
+
+      required:
+        - clocks
+        - clock-names
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - allwinner,sun8i-r40-rtc
+              - allwinner,sun50i-h616-rtc
+              - allwinner,sun50i-r329-rtc
 
     then:
       properties:
@@ -127,7 +188,6 @@ required:
   - compatible
   - reg
   - interrupts
-  - clock-output-names
 
 additionalProperties: false
 
diff --git a/Documentation/devicetree/bindings/rtc/atmel,at91sam9-rtc.txt b/Documentation/devicetree/bindings/rtc/atmel,at91sam9-rtc.txt
deleted file mode 100644 (file)
index 3f0e2a5..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-Atmel AT91SAM9260 Real Time Timer
-
-Required properties:
-- compatible: should be one of the following:
-       - "atmel,at91sam9260-rtt"
-       - "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt"
-- reg: should encode the memory region of the RTT controller
-- interrupts: rtt alarm/event interrupt
-- clocks: should contain the 32 KHz slow clk that will drive the RTT block.
-- atmel,rtt-rtc-time-reg: should encode the GPBR register used to store
-       the time base when the RTT is used as an RTC.
-       The first cell should point to the GPBR node and the second one
-       encode the offset within the GPBR block (or in other words, the
-       GPBR register used to store the time base).
-
-
-Example:
-
-rtt@fffffd20 {
-       compatible = "atmel,at91sam9260-rtt";
-       reg = <0xfffffd20 0x10>;
-       interrupts = <1 4 7>;
-       clocks = <&clk32k>;
-       atmel,rtt-rtc-time-reg = <&gpbr 0x0>;
-};
diff --git a/Documentation/devicetree/bindings/rtc/atmel,at91sam9260-rtt.yaml b/Documentation/devicetree/bindings/rtc/atmel,at91sam9260-rtt.yaml
new file mode 100644 (file)
index 0000000..0ef1b7f
--- /dev/null
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2022 Microchip Technology, Inc. and its subsidiaries
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/atmel,at91sam9260-rtt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Atmel AT91 RTT Device Tree Bindings
+
+allOf:
+  - $ref: "rtc.yaml#"
+
+maintainers:
+  - Alexandre Belloni <alexandre.belloni@bootlin.com>
+
+properties:
+  compatible:
+    oneOf:
+      - items:
+          - const: atmel,at91sam9260-rtt
+      - items:
+          - const: microchip,sam9x60-rtt
+          - const: atmel,at91sam9260-rtt
+      - items:
+          - const: microchip,sama7g5-rtt
+          - const: microchip,sam9x60-rtt
+          - const: atmel,at91sam9260-rtt
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  atmel,rtt-rtc-time-reg:
+    $ref: /schemas/types.yaml#/definitions/phandle-array
+    items:
+      - items:
+          - description: Phandle to the GPBR node.
+          - description: Offset within the GPBR block.
+    description:
+      Should encode the GPBR register used to store the time base when the
+      RTT is used as an RTC. The first cell should point to the GPBR node
+      and the second one encodes the offset within the GPBR block (or in
+      other words, the GPBR register used to store the time base).
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - atmel,rtt-rtc-time-reg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+
+    rtc@fffffd20 {
+        compatible = "atmel,at91sam9260-rtt";
+        reg = <0xfffffd20 0x10>;
+        interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+        clocks = <&clk32k>;
+        atmel,rtt-rtc-time-reg = <&gpbr 0x0>;
+    };
index a98ed66d092e8960ed1c469c468251b2304a63cf..0cabb773c3976afabec450797e8202fd40c6761e 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung's Exynos USI (Universal Serial Interface) binding
 
 maintainers:
   - Sam Protsenko <semen.protsenko@linaro.org>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   USI IP-core provides selectable serial protocol (UART, SPI or High-Speed I2C).
index cea2bf3544f0ae5fa26dbc2ed5be0424553c8b2c..9bc4585bb6e519329498f9de51c65c8701782a1d 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Insignal Arndale boards audio complex
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
 properties:
index cb51af90435e7b350d21664d9d03415029531287..ac151d3c1d779b0bc17f161fd272ea012c7e6993 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung SMDK5250 audio complex with WM8994 codec
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
 properties:
index 0c3b3302b842412ecfee100b1b4ea2efdf2697c7..51a83d3c72742e6e57e9474c53411b00d56ee8cb 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Google Snow audio complex with MAX9809x codec
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
 properties:
index 74712d6f3ef490624abc1ca5a42644a16a0b9599..491e08019c040c650647c7c38e382ff190623af2 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos5433 TM2(E) audio complex with WM5110 codec
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
 properties:
index b3dbcba33e41f67cd111f3ec587a5941a7b81dd2..fe2e15504ebc41119feefe1d57ccc743e7a78f71 100644 (file)
@@ -136,8 +136,7 @@ allOf:
         compatible:
           contains:
             const: st,stm32f4-sai
-
-  - then:
+    then:
       properties:
         clocks:
           items:
@@ -148,8 +147,7 @@ allOf:
           items:
             - const: x8k
             - const: x11k
-
-  - else:
+    else:
       properties:
         clocks:
           items:
index b104899205f6d2da17fd86325cb0fec4312e22d8..5de710adfa63cb12311aff4209b40a0f49d39d99 100644 (file)
@@ -124,7 +124,6 @@ properties:
     description: |
       Override the default TX fifo size.  Unit is words.  Ignored if 0.
     $ref: /schemas/types.yaml#/definitions/uint32
-    maxItems: 1
     default: 64
 
   renesas,rx-fifo-size:
@@ -132,7 +131,6 @@ properties:
     description: |
       Override the default RX fifo size.  Unit is words.  Ignored if 0.
     $ref: /schemas/types.yaml#/definitions/uint32
-    maxItems: 1
     default: 64
 
 required:
index f0db3fb3d68804f80e850137446af20ba036d919..25b1b6c12d4de7890119744088581d2c53612e88 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Peripheral-specific properties for Samsung S3C/S5P/Exynos SoC SPI controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description:
   See spi-peripheral-props.yaml for more info.
index bf9a76d931d243c6e76f65f0dc1f7c9dba9ad6d9..a50f24f9359de1b94e536b9725a1b47fd38bd836 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S3C/S5P/Exynos SoC SPI controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description:
   All the SPI controller nodes should be represented in the aliases node using
index 668a9a41a7754c95c280ca9b32fe20e2d4ea0eb7..993430be355b47318105cceb6a274ce05f250d45 100644 (file)
@@ -136,14 +136,14 @@ required:
   - reg
 
 if:
-  properties:
-    compatible:
-      contains:
-        enum:
-          - qcom,rpm-msg-ram
-          - rockchip,rk3288-pmu-sram
-
-else:
+  not:
+    properties:
+      compatible:
+        contains:
+          enum:
+            - qcom,rpm-msg-ram
+            - rockchip,rk3288-pmu-sram
+then:
   required:
     - "#address-cells"
     - "#size-cells"
index 17129f75d9624935b9d3a5899c62ee47671c88f2..1344df708e2d290accd1fd52bf011a01ef03fd62 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC Thermal Management Unit (TMU)
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   For multi-instance tmu each instance should have an alias correctly numbered
index 22b91a27d776215b04ccbf7d48d4fb9a6a074c58..6b9a3bcb3926f5195b34184a8c5515c9ee242d8a 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC USB 3.0 DWC3 Controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index fbf07d6e707afd6b788c3de2bd789438ac87ffaa..340dff8d19c3691484a4e724f46bf83ce7245b56 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC USB 2.0 EHCI/OHCI Controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 8fe2d934f949acf343d350bb214342572567f7ff..01430973ecec39477543965c1606c1d1e2042313 100644 (file)
@@ -560,6 +560,8 @@ patternProperties:
     description: Ingenieurburo Fur Ic-Technologie (I/F/I)
   "^ilitek,.*":
     description: ILI Technology Corporation (ILITEK)
+  "^imagis,.*":
+    description: Imagis Technologies Co., Ltd.
   "^img,.*":
     description: Imagination Technologies Ltd.
   "^imi,.*":
index 91a98ccd4226f5051a91ca6fdcde6e0ee56ca2d7..d060438e1402d502ab0cf068e2d33afe6584572a 100644 (file)
@@ -55,6 +55,11 @@ properties:
               - renesas,r8a779a0-wdt     # R-Car V3U
           - const: renesas,rcar-gen3-wdt # R-Car Gen3 and RZ/G2
 
+      - items:
+          - enum:
+              - renesas,r8a779f0-wdt     # R-Car S4-8
+          - const: renesas,rcar-gen4-wdt # R-Car Gen4
+
   reg:
     maxItems: 1
 
index 55006678394a2b8333308a3ed69ce6aec5e02a98..36a76cbe90954f0e9c9985e981f564e9eda2de58 100644 (file)
@@ -185,6 +185,12 @@ DMA Fence Chain
 .. kernel-doc:: include/linux/dma-fence-chain.h
    :internal:
 
+DMA Fence unwrap
+~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: include/linux/dma-fence-unwrap.h
+   :internal:
+
 DMA Fence uABI/Sync File
 ~~~~~~~~~~~~~~~~~~~~~~~~
 
index be793c49a772dc221052006a2391c5e858806dae..d7507becf67478da56deb15c60bf306e1a40e8df 100644 (file)
@@ -73,7 +73,7 @@ busy.
 If successful, the cache backend can then start setting up the cache.  In the
 event that the initialisation fails, the cache backend should call::
 
-       void fscache_relinquish_cookie(struct fscache_cache *cache);
+       void fscache_relinquish_cache(struct fscache_cache *cache);
 
 to reset and discard the cookie.
 
@@ -110,9 +110,9 @@ to withdraw them, calling::
 
 on the cookie that each object belongs to.  This schedules the specified cookie
 for withdrawal.  This gets offloaded to a workqueue.  The cache backend can
-test for completion by calling::
+wait for completion by calling::
 
-       bool fscache_are_objects_withdrawn(struct fscache_cookie *cache);
+       void fscache_wait_for_objects(struct fscache_cache *cache);
 
 Once all the cookies are withdrawn, a cache backend can withdraw all the
 volumes, calling::
@@ -125,7 +125,7 @@ outstanding accesses on the volume to complete before returning.
 When the the cache is completely withdrawn, fscache should be notified by
 calling::
 
-       void fscache_cache_relinquish(struct fscache_cache *cache);
+       void fscache_relinquish_cache(struct fscache_cache *cache);
 
 to clear fields in the cookie and discard the caller's ref on it.
 
index 5066113acad59aa0ba46de39da6e288066e8020a..7308d76a29dc78a5266fec862ff8d5536892120a 100644 (file)
@@ -404,22 +404,21 @@ schedule a write of that region::
 And if an error occurs before that point is reached, the marks can be removed
 by calling::
 
-       void fscache_clear_page_bits(struct fscache_cookie *cookie,
-                                    struct address_space *mapping,
+       void fscache_clear_page_bits(struct address_space *mapping,
                                     loff_t start, size_t len,
                                     bool caching)
 
-In both of these functions, the cookie representing the cache object to be
-written to and a pointer to the mapping to which the source pages are attached
-are passed in; start and len indicate the size of the region that's going to be
-written (it doesn't have to align to page boundaries necessarily, but it does
-have to align to DIO boundaries on the backing filesystem).  The caching
-parameter indicates if caching should be skipped, and if false, the functions
-do nothing.
-
-The write function takes some additional parameters: i_size indicates the size
-of the netfs file and term_func indicates an optional completion function, to
-which term_func_priv will be passed, along with the error or amount written.
+In these functions, a pointer to the mapping to which the source pages are
+attached is passed in and start and len indicate the size of the region that's
+going to be written (it doesn't have to align to page boundaries necessarily,
+but it does have to align to DIO boundaries on the backing filesystem).  The
+caching parameter indicates if caching should be skipped, and if false, the
+functions do nothing.
+
+The write function takes some additional parameters: the cookie representing
+the cache object to be written to, i_size indicates the size of the netfs file
+and term_func indicates an optional completion function, to which
+term_func_priv will be passed, along with the error or amount written.
 
 Note that the write function will always run asynchronously and will unmark all
 the pages upon completion before calling term_func.
index b0d354fd80666199b0f4f35deaebf06f015935fb..1af600db2e708eeb229d90efbaa04fa060c6b345 100644 (file)
@@ -82,10 +82,10 @@ Signing Update                 Supported.
 Pre-authentication integrity   Supported.
 SMB3 encryption(CCM, GCM)      Supported. (CCM and GCM128 supported, GCM256 in
                                progress)
-SMB direct(RDMA)               Partially Supported. SMB3 Multi-channel is
-                               required to connect to Windows client.
+SMB direct(RDMA)               Supported.
 SMB3 Multi-channel             Partially Supported. Planned to implement
                                replay/retry mechanisms for future.
+Receive Side Scaling mode      Supported.
 SMB3.1.1 POSIX extension       Supported.
 ACLs                           Partially Supported. only DACLs available, SACLs
                                (auditing) is planned for the future. For
index 1d831e3cbcb33d0623844d83bddc607501e66c0a..8cc536d08f51fec34cbea15f8283dda76cc72900 100644 (file)
@@ -549,7 +549,7 @@ Pagecache
 ~~~~~~~~~
 
 For filesystems using Linux's pagecache, the ``->readpage()`` and
-``->readpages()`` methods must be modified to verify pages before they
+``->readahead()`` methods must be modified to verify pages before they
 are marked Uptodate.  Merely hooking ``->read_iter()`` would be
 insufficient, since ``->read_iter()`` is not used for memory maps.
 
@@ -611,7 +611,7 @@ workqueue, and then the workqueue work does the decryption or
 verification.  Finally, pages where no decryption or verity error
 occurred are marked Uptodate, and the pages are unlocked.
 
-Files on ext4 and f2fs may contain holes.  Normally, ``->readpages()``
+Files on ext4 and f2fs may contain holes.  Normally, ``->readahead()``
 simply zeroes holes and sets the corresponding pages Uptodate; no bios
 are issued.  To prevent this case from bypassing fs-verity, these
 filesystems use fsverity_verify_page() to verify hole pages.
@@ -778,7 +778,7 @@ weren't already directly answered in other parts of this document.
     - To prevent bypassing verification, pages must not be marked
       Uptodate until they've been verified.  Currently, each
       filesystem is responsible for marking pages Uptodate via
-      ``->readpages()``.  Therefore, currently it's not possible for
+      ``->readahead()``.  Therefore, currently it's not possible for
       the VFS to do the verification on its own.  Changing this would
       require significant changes to the VFS and all filesystems.
 
index 2998cec9af4b10f12ac88a1ed6f417ebb4aa075a..c26d854275a0ec399c610af4d356d0a9d9ef0e21 100644 (file)
@@ -241,8 +241,6 @@ prototypes::
        int (*writepages)(struct address_space *, struct writeback_control *);
        bool (*dirty_folio)(struct address_space *, struct folio *folio);
        void (*readahead)(struct readahead_control *);
-       int (*readpages)(struct file *filp, struct address_space *mapping,
-                       struct list_head *pages, unsigned nr_pages);
        int (*write_begin)(struct file *, struct address_space *mapping,
                                loff_t pos, unsigned len, unsigned flags,
                                struct page **pagep, void **fsdata);
@@ -274,7 +272,6 @@ readpage:           yes, unlocks                            shared
 writepages:
 dirty_folio            maybe
 readahead:             yes, unlocks                            shared
-readpages:             no                                      shared
 write_begin:           locks the page           exclusive
 write_end:             yes, unlocks             exclusive
 bmap:
@@ -300,9 +297,6 @@ completion.
 
 ->readahead() unlocks the pages that I/O is attempted on like ->readpage().
 
-->readpages() populates the pagecache with the passed pages and starts
-I/O against them.  They come unlocked upon I/O completion.
-
 ->writepage() is used for two purposes: for "memory cleansing" and for
 "sync".  These are quite different operations and the behaviour may differ
 depending upon the mode.
index 4f373a8ec47bc140aff7d0f9fd25efd63f0fa7aa..69f00179fdfeb60bb113a5582bb2cfb8a234a5e3 100644 (file)
@@ -7,6 +7,8 @@ Network Filesystem Helper Library
 .. Contents:
 
  - Overview.
+ - Per-inode context.
+   - Inode context helper functions.
  - Buffered read helpers.
    - Read helper functions.
    - Read helper structures.
@@ -28,6 +30,69 @@ Note that the library module doesn't link against local caching directly, so
 access must be provided by the netfs.
 
 
+Per-Inode Context
+=================
+
+The network filesystem helper library needs a place to store a bit of state for
+its use on each netfs inode it is helping to manage.  To this end, a context
+structure is defined::
+
+       struct netfs_i_context {
+               const struct netfs_request_ops *ops;
+               struct fscache_cookie   *cache;
+       };
+
+A network filesystem that wants to use netfs lib must place one of these
+directly after the VFS ``struct inode`` it allocates, usually as part of its
+own struct.  This can be done in a way similar to the following::
+
+       struct my_inode {
+               struct {
+                       /* These must be contiguous */
+                       struct inode            vfs_inode;
+                       struct netfs_i_context  netfs_ctx;
+               };
+               ...
+       };
+
+This allows netfslib to find its state by simple offset from the inode pointer,
+thereby allowing the netfslib helper functions to be pointed to directly by the
+VFS/VM operation tables.
+
+The structure contains the following fields:
+
+ * ``ops``
+
+   The set of operations provided by the network filesystem to netfslib.
+
+ * ``cache``
+
+   Local caching cookie, or NULL if no caching is enabled.  This field does not
+   exist if fscache is disabled.
+
+
+Inode Context Helper Functions
+------------------------------
+
+To help deal with the per-inode context, a number helper functions are
+provided.  Firstly, a function to perform basic initialisation on a context and
+set the operations table pointer::
+
+       void netfs_i_context_init(struct inode *inode,
+                                 const struct netfs_request_ops *ops);
+
+then two functions to cast between the VFS inode structure and the netfs
+context::
+
+       struct netfs_i_context *netfs_i_context(struct inode *inode);
+       struct inode *netfs_inode(struct netfs_i_context *ctx);
+
+and finally, a function to get the cache cookie pointer from the context
+attached to an inode (or NULL if fscache is disabled)::
+
+       struct fscache_cookie *netfs_i_cookie(struct inode *inode);
+
+
 Buffered Read Helpers
 =====================
 
@@ -70,38 +135,22 @@ Read Helper Functions
 
 Three read helpers are provided::
 
-       void netfs_readahead(struct readahead_control *ractl,
-                            const struct netfs_read_request_ops *ops,
-                            void *netfs_priv);
+       void netfs_readahead(struct readahead_control *ractl);
        int netfs_readpage(struct file *file,
-                          struct folio *folio,
-                          const struct netfs_read_request_ops *ops,
-                          void *netfs_priv);
+                          struct page *page);
        int netfs_write_begin(struct file *file,
                              struct address_space *mapping,
                              loff_t pos,
                              unsigned int len,
                              unsigned int flags,
                              struct folio **_folio,
-                             void **_fsdata,
-                             const struct netfs_read_request_ops *ops,
-                             void *netfs_priv);
-
-Each corresponds to a VM operation, with the addition of a couple of parameters
-for the use of the read helpers:
+                             void **_fsdata);
 
- * ``ops``
-
-   A table of operations through which the helpers can talk to the filesystem.
-
- * ``netfs_priv``
+Each corresponds to a VM address space operation.  These operations use the
+state in the per-inode context.
 
-   Filesystem private data (can be NULL).
-
-Both of these values will be stored into the read request structure.
-
-For ->readahead() and ->readpage(), the network filesystem should just jump
-into the corresponding read helper; whereas for ->write_begin(), it may be a
+For ->readahead() and ->readpage(), the network filesystem just point directly
+at the corresponding read helper; whereas for ->write_begin(), it may be a
 little more complicated as the network filesystem might want to flush
 conflicting writes or track dirty data and needs to put the acquired folio if
 an error occurs after calling the helper.
@@ -116,7 +165,7 @@ occurs, the request will get partially completed if sufficient data is read.
 
 Additionally, there is::
 
-  * void netfs_subreq_terminated(struct netfs_read_subrequest *subreq,
+  * void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
                                 ssize_t transferred_or_error,
                                 bool was_async);
 
@@ -132,7 +181,7 @@ Read Helper Structures
 The read helpers make use of a couple of structures to maintain the state of
 the read.  The first is a structure that manages a read request as a whole::
 
-       struct netfs_read_request {
+       struct netfs_io_request {
                struct inode            *inode;
                struct address_space    *mapping;
                struct netfs_cache_resources cache_resources;
@@ -140,7 +189,7 @@ the read.  The first is a structure that manages a read request as a whole::
                loff_t                  start;
                size_t                  len;
                loff_t                  i_size;
-               const struct netfs_read_request_ops *netfs_ops;
+               const struct netfs_request_ops *netfs_ops;
                unsigned int            debug_id;
                ...
        };
@@ -187,8 +236,8 @@ The above fields are the ones the netfs can use.  They are:
 The second structure is used to manage individual slices of the overall read
 request::
 
-       struct netfs_read_subrequest {
-               struct netfs_read_request *rreq;
+       struct netfs_io_subrequest {
+               struct netfs_io_request *rreq;
                loff_t                  start;
                size_t                  len;
                size_t                  transferred;
@@ -244,32 +293,26 @@ Read Helper Operations
 The network filesystem must provide the read helpers with a table of operations
 through which it can issue requests and negotiate::
 
-       struct netfs_read_request_ops {
-               void (*init_rreq)(struct netfs_read_request *rreq, struct file *file);
-               bool (*is_cache_enabled)(struct inode *inode);
-               int (*begin_cache_operation)(struct netfs_read_request *rreq);
-               void (*expand_readahead)(struct netfs_read_request *rreq);
-               bool (*clamp_length)(struct netfs_read_subrequest *subreq);
-               void (*issue_op)(struct netfs_read_subrequest *subreq);
-               bool (*is_still_valid)(struct netfs_read_request *rreq);
+       struct netfs_request_ops {
+               void (*init_request)(struct netfs_io_request *rreq, struct file *file);
+               int (*begin_cache_operation)(struct netfs_io_request *rreq);
+               void (*expand_readahead)(struct netfs_io_request *rreq);
+               bool (*clamp_length)(struct netfs_io_subrequest *subreq);
+               void (*issue_read)(struct netfs_io_subrequest *subreq);
+               bool (*is_still_valid)(struct netfs_io_request *rreq);
                int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
                                         struct folio *folio, void **_fsdata);
-               void (*done)(struct netfs_read_request *rreq);
+               void (*done)(struct netfs_io_request *rreq);
                void (*cleanup)(struct address_space *mapping, void *netfs_priv);
        };
 
 The operations are as follows:
 
- * ``init_rreq()``
+ * ``init_request()``
 
    [Optional] This is called to initialise the request structure.  It is given
    the file for reference and can modify the ->netfs_priv value.
 
- * ``is_cache_enabled()``
-
-   [Required] This is called by netfs_write_begin() to ask if the file is being
-   cached.  It should return true if it is being cached and false otherwise.
-
  * ``begin_cache_operation()``
 
    [Optional] This is called to ask the network filesystem to call into the
@@ -305,7 +348,7 @@ The operations are as follows:
 
    This should return 0 on success and an error code on error.
 
- * ``issue_op()``
+ * ``issue_read()``
 
    [Required] The helpers use this to dispatch a subrequest to the server for
    reading.  In the subrequest, ->start, ->len and ->transferred indicate what
@@ -420,12 +463,12 @@ The network filesystem's ->begin_cache_operation() method is called to set up a
 cache and this must call into the cache to do the work.  If using fscache, for
 example, the cache would call::
 
-       int fscache_begin_read_operation(struct netfs_read_request *rreq,
+       int fscache_begin_read_operation(struct netfs_io_request *rreq,
                                         struct fscache_cookie *cookie);
 
 passing in the request pointer and the cookie corresponding to the file.
 
-The netfs_read_request object contains a place for the cache to hang its
+The netfs_io_request object contains a place for the cache to hang its
 state::
 
        struct netfs_cache_resources {
@@ -443,7 +486,7 @@ operation table looks like the following::
                void (*expand_readahead)(struct netfs_cache_resources *cres,
                                         loff_t *_start, size_t *_len, loff_t i_size);
 
-               enum netfs_read_source (*prepare_read)(struct netfs_read_subrequest *subreq,
+               enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq,
                                                       loff_t i_size);
 
                int (*read)(struct netfs_cache_resources *cres,
@@ -562,4 +605,5 @@ API Function Reference
 ======================
 
 .. kernel-doc:: include/linux/netfs.h
-.. kernel-doc:: fs/netfs/read_helper.c
+.. kernel-doc:: fs/netfs/buffered_read.c
+.. kernel-doc:: fs/netfs/io.c
index 4f14edf93941e96683f876240b64c7b421bcf8a1..794bd1a66bfbdfed8e4b95c003ac7a6a90de1b9e 100644 (file)
@@ -726,8 +726,6 @@ cache in your filesystem.  The following members are defined:
                int (*writepages)(struct address_space *, struct writeback_control *);
                bool (*dirty_folio)(struct address_space *, struct folio *);
                void (*readahead)(struct readahead_control *);
-               int (*readpages)(struct file *filp, struct address_space *mapping,
-                                struct list_head *pages, unsigned nr_pages);
                int (*write_begin)(struct file *, struct address_space *mapping,
                                   loff_t pos, unsigned len, unsigned flags,
                                struct page **pagep, void **fsdata);
@@ -817,15 +815,6 @@ cache in your filesystem.  The following members are defined:
        completes successfully.  Setting PageError on any page will be
        ignored; simply unlock the page if an I/O error occurs.
 
-``readpages``
-       called by the VM to read pages associated with the address_space
-       object.  This is essentially just a vector version of readpage.
-       Instead of just one page, several pages are requested.
-       readpages is only used for read-ahead, so read errors are
-       ignored.  If anything goes wrong, feel free to give up.
-       This interface is deprecated and will be removed by the end of
-       2020; implement readahead instead.
-
 ``write_begin``
        Called by the generic buffered write code to ask the filesystem
        to prepare to write len bytes at the given offset in the file.
index 2d1fc03d346ea7c044f945017e6ca74a50b58092..ef19b9c1352362e7d7c01773650b20d9fd7a5bd2 100644 (file)
@@ -77,6 +77,17 @@ HOSTLDLIBS
 ----------
 Additional libraries to link against when building host programs.
 
+.. _userkbuildflags:
+
+USERCFLAGS
+----------
+Additional options used for $(CC) when compiling userprogs.
+
+USERLDFLAGS
+-----------
+Additional options used for $(LD) when linking userprogs. userprogs are linked
+with CC, so $(USERLDFLAGS) should include "-Wl," prefix as applicable.
+
 KBUILD_KCONFIG
 --------------
 Set the top-level Kconfig file to the value of this environment
index d32616891dcfb23dd2a77e525ab316502c501a3f..b854bb4131647dc1ff311dcb699fe3b5a8685a7e 100644 (file)
@@ -49,17 +49,36 @@ example: ::
 LLVM Utilities
 --------------
 
-LLVM has substitutes for GNU binutils utilities. Kbuild supports ``LLVM=1``
-to enable them. ::
-
-       make LLVM=1
-
-They can be enabled individually. The full list of the parameters: ::
+LLVM has substitutes for GNU binutils utilities. They can be enabled individually.
+The full list of supported make variables::
 
        make CC=clang LD=ld.lld AR=llvm-ar NM=llvm-nm STRIP=llvm-strip \
          OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump READELF=llvm-readelf \
          HOSTCC=clang HOSTCXX=clang++ HOSTAR=llvm-ar HOSTLD=ld.lld
 
+To simplify the above command, Kbuild supports the ``LLVM`` variable::
+
+       make LLVM=1
+
+If your LLVM tools are not available in your PATH, you can supply their
+location using the LLVM variable with a trailing slash::
+
+       make LLVM=/path/to/llvm/
+
+which will use ``/path/to/llvm/clang``, ``/path/to/llvm/ld.lld``, etc.
+
+If your LLVM tools have a version suffix and you want to test with that
+explicit version rather than the unsuffixed executables like ``LLVM=1``, you
+can pass the suffix using the ``LLVM`` variable::
+
+       make LLVM=-14
+
+which will use ``clang-14``, ``ld.lld-14``, etc.
+
+``LLVM=0`` is not the same as omitting ``LLVM`` altogether, it will behave like
+``LLVM=1``. If you only wish to use certain LLVM utilities, use their respective
+make variables.
+
 The integrated assembler is enabled by default. You can pass ``LLVM_IAS=0`` to
 disable it.
 
index b008b90b92c9fcb279a4e08ad2e3763ab67091a3..11a296e52d680014f131e0e57fb9bba60fefb3aa 100644 (file)
@@ -982,6 +982,8 @@ The syntax is quite similar. The difference is to use "userprogs" instead of
 
        When linking bpfilter_umh, it will be passed the extra option -static.
 
+       From command line, :ref:`USERCFLAGS and USERLDFLAGS <userkbuildflags>` will also be used.
+
 5.4 When userspace programs are actually built
 ----------------------------------------------
 
index bfa75ea1b66a3c305ec0b68eb91e95bfc2e935ac..9933faad47714c2cc79a91e7f5016df169a6c374 100644 (file)
@@ -211,9 +211,6 @@ raw_spinlock_t and spinlock_t
 raw_spinlock_t
 --------------
 
-raw_spinlock_t is a strict spinning lock implementation regardless of the
-kernel configuration including PREEMPT_RT enabled kernels.
-
 raw_spinlock_t is a strict spinning lock implementation in all kernels,
 including PREEMPT_RT kernels.  Use raw_spinlock_t only in real critical
 core code, low-level interrupt handling and places where disabling
index f0a60435b1240b65a4f24625f6aaa63199607a82..3e03283c144ea0be8e7957818eed6bbd85fa80c2 100644 (file)
@@ -12,6 +12,7 @@ additions to this manual.
    configure-git
    rebasing-and-merging
    pull-requests
+   messy-diffstat
    maintainer-entry-profile
    modifying-patches
 
diff --git a/Documentation/maintainer/messy-diffstat.rst b/Documentation/maintainer/messy-diffstat.rst
new file mode 100644 (file)
index 0000000..c015f66
--- /dev/null
@@ -0,0 +1,96 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================================
+Handling messy pull-request diffstats
+=====================================
+
+Subsystem maintainers routinely use ``git request-pull`` as part of the
+process of sending work upstream.  Normally, the result includes a nice
+diffstat that shows which files will be touched and how much of each will
+be changed.  Occasionally, though, a repository with a relatively
+complicated development history will yield a massive diffstat containing a
+great deal of unrelated work.  The result looks ugly and obscures what the
+pull request is actually doing.  This document describes what is happening
+and how to fix things up; it is derived from The Wisdom of Linus Torvalds,
+found in Linus1_ and Linus2_.
+
+.. _Linus1: https://lore.kernel.org/lkml/CAHk-=wg3wXH2JNxkQi+eLZkpuxqV+wPiHhw_Jf7ViH33Sw7PHA@mail.gmail.com/
+.. _Linus2: https://lore.kernel.org/lkml/CAHk-=wgXbSa8yq8Dht8at+gxb_idnJ7X5qWZQWRBN4_CUPr=eQ@mail.gmail.com/
+
+A Git development history proceeds as a series of commits.  In a simplified
+manner, mainline kernel development looks like this::
+
+  ... vM --- vN-rc1 --- vN-rc2 --- vN-rc3 --- ... --- vN-rc7 --- vN
+
+If one wants to see what has changed between two points, a command like
+this will do the job::
+
+  $ git diff --stat --summary vN-rc2..vN-rc3
+
+Here, there are two clear points in the history; Git will essentially
+"subtract" the beginning point from the end point and display the resulting
+differences.  The requested operation is unambiguous and easy enough to
+understand.
+
+When a subsystem maintainer creates a branch and commits changes to it, the
+result in the simplest case is a history that looks like::
+
+  ... vM --- vN-rc1 --- vN-rc2 --- vN-rc3 --- ... --- vN-rc7 --- vN
+                          |
+                          +-- c1 --- c2 --- ... --- cN
+
+If that maintainer now uses ``git diff`` to see what has changed between
+the mainline branch (let's call it "linus") and cN, there are still two
+clear endpoints, and the result is as expected.  So a pull request
+generated with ``git request-pull`` will also be as expected.  But now
+consider a slightly more complex development history::
+
+  ... vM --- vN-rc1 --- vN-rc2 --- vN-rc3 --- ... --- vN-rc7 --- vN
+                |         |
+                |         +-- c1 --- c2 --- ... --- cN
+                |                   /
+                +-- x1 --- x2 --- x3
+
+Our maintainer has created one branch at vN-rc1 and another at vN-rc2; the
+two were then subsequently merged into c2.  Now a pull request generated
+for cN may end up being messy indeed, and developers often end up wondering
+why.
+
+What is happening here is that there are no longer two clear end points for
+the ``git diff`` operation to use.  The development culminating in cN
+started in two different places; to generate the diffstat, ``git diff``
+ends up having pick one of them and hoping for the best.  If the diffstat
+starts at vN-rc1, it may end up including all of the changes between there
+and the second origin end point (vN-rc2), which is certainly not what our
+maintainer had in mind.  With all of that extra junk in the diffstat, it
+may be impossible to tell what actually happened in the changes leading up
+to cN.
+
+Maintainers often try to resolve this problem by, for example, rebasing the
+branch or performing another merge with the linus branch, then recreating
+the pull request.  This approach tends not to lead to joy at the receiving
+end of that pull request; rebasing and/or merging just before pushing
+upstream is a well-known way to get a grumpy response.
+
+So what is to be done?  The best response when confronted with this
+situation is to indeed to do a merge with the branch you intend your work
+to be pulled into, but to do it privately, as if it were the source of
+shame.  Create a new, throwaway branch and do the merge there::
+
+  ... vM --- vN-rc1 --- vN-rc2 --- vN-rc3 --- ... --- vN-rc7 --- vN
+                |         |                                      |
+                |         +-- c1 --- c2 --- ... --- cN           |
+                |                   /               |            |
+                +-- x1 --- x2 --- x3                +------------+-- TEMP
+
+The merge operation resolves all of the complications resulting from the
+multiple beginning points, yielding a coherent result that contains only
+the differences from the mainline branch.  Now it will be possible to
+generate a diffstat with the desired information::
+
+  $ git diff -C --stat --summary linus..TEMP
+
+Save the output from this command, then simply delete the TEMP branch;
+definitely do not expose it to the outside world.  Take the saved diffstat
+output and edit it into the messy pull request, yielding a result that
+shows what is really going on.  That request can then be sent upstream.
index 525e6842dd3392a529672fbdf0b396b5cfaa5cb6..43be3782e5dfe857f00bfa448e14df5d11e0289d 100644 (file)
@@ -894,7 +894,7 @@ xmit_hash_policy
                Uses XOR of hardware MAC addresses and packet type ID
                field to generate the hash. The formula is
 
-               hash = source MAC XOR destination MAC XOR packet type ID
+               hash = source MAC[5] XOR destination MAC[5] XOR packet type ID
                slave number = hash modulo slave count
 
                This algorithm will place all traffic to a particular
@@ -910,7 +910,7 @@ xmit_hash_policy
                Uses XOR of hardware MAC addresses and IP addresses to
                generate the hash.  The formula is
 
-               hash = source MAC XOR destination MAC XOR packet type ID
+               hash = source MAC[5] XOR destination MAC[5] XOR packet type ID
                hash = hash XOR source IP XOR destination IP
                hash = hash XOR (hash RSHIFT 16)
                hash = hash XOR (hash RSHIFT 8)
index b0024aa7b0514f7174ebf7512e2c7da256b494d1..66828293d9cb715391459d4dc37868217da55113 100644 (file)
@@ -267,6 +267,13 @@ ipfrag_max_dist - INTEGER
        from different IP datagrams, which could result in data corruption.
        Default: 64
 
+bc_forwarding - INTEGER
+       bc_forwarding enables the feature described in rfc1812#section-5.3.5.2
+       and rfc2644. It allows the router to forward directed broadcast.
+       To enable this feature, the 'all' entry and the input interface entry
+       should be set to 1.
+       Default: 0
+
 INET peer storage
 =================
 
index ea915c1960488a416a696bd1aac2b80b871865d1..e23b876ad6ebb6b2c571cacfac873f542bfb5862 100644 (file)
@@ -7,7 +7,6 @@ RISC-V architecture
 
     boot-image-header
     vm-layout
-    pmu
     patch-acceptance
 
     features
index 4392b3cb40206fe7632a3022b6296d2e253655ee..b5feb5b1d9054899db0216529c48d6142796c258 100644 (file)
@@ -128,6 +128,7 @@ class KernelCmd(Directive):
         return out
 
     def nestedParse(self, lines, fname):
+        env = self.state.document.settings.env
         content = ViewList()
         node = nodes.section()
 
@@ -137,7 +138,7 @@ class KernelCmd(Directive):
                 code_block += "\n    " + l
             lines = code_block + "\n\n"
 
-        line_regex = re.compile("^#define LINENO (\S+)\#([0-9]+)$")
+        line_regex = re.compile("^\.\. LINENO (\S+)\#([0-9]+)$")
         ln = 0
         n = 0
         f = fname
@@ -154,6 +155,9 @@ class KernelCmd(Directive):
                     self.do_parse(content, node)
                     content = ViewList()
 
+                    # Add the file to Sphinx build dependencies
+                    env.note_dependency(os.path.abspath(f))
+
                 f = new_f
 
                 # sphinx counts lines from 0
index 8138d69a6987ad43f42fe0f0a13fcb483c592f89..27b701ed3681ed430aa421985d41fb54c7873fc1 100644 (file)
@@ -33,6 +33,7 @@ u"""
 
 import codecs
 import os
+import re
 import subprocess
 import sys
 
@@ -82,7 +83,7 @@ class KernelFeat(Directive):
 
         env = doc.settings.env
         cwd = path.dirname(doc.current_source)
-        cmd = "get_feat.pl rest --dir "
+        cmd = "get_feat.pl rest --enable-fname --dir "
         cmd += self.arguments[0]
 
         if len(self.arguments) > 1:
@@ -102,7 +103,22 @@ class KernelFeat(Directive):
         shell_env["srctree"] = srctree
 
         lines = self.runCmd(cmd, shell=True, cwd=cwd, env=shell_env)
-        nodeList = self.nestedParse(lines, fname)
+
+        line_regex = re.compile("^\.\. FILE (\S+)$")
+
+        out_lines = ""
+
+        for line in lines.split("\n"):
+            match = line_regex.search(line)
+            if match:
+                fname = match.group(1)
+
+                # Add the file to Sphinx build dependencies
+                env.note_dependency(os.path.abspath(fname))
+            else:
+                out_lines += line + "\n"
+
+        nodeList = self.nestedParse(out_lines, fname)
         return nodeList
 
     def runCmd(self, cmd, **kwargs):
index f523aa68a36b397b3c6f12998ec597c28e7ad69a..abe7680883771d49b72d7f382b45ca3e2514df7f 100755 (executable)
@@ -59,6 +59,7 @@ class KernelInclude(Include):
     u"""KernelInclude (``kernel-include``) directive"""
 
     def run(self):
+        env = self.state.document.settings.env
         path = os.path.realpath(
             os.path.expandvars(self.arguments[0]))
 
@@ -70,6 +71,8 @@ class KernelInclude(Include):
 
         self.arguments[0] = path
 
+        env.note_dependency(os.path.abspath(path))
+
         #return super(KernelInclude, self).run() # won't work, see HINTs in _run()
         return self._run()
 
index 8189c33b9ddafd540d38c58290d07ce9248a1b2a..9395892c7ba38b1956815b30c997f5d8d0ec898c 100644 (file)
@@ -130,7 +130,7 @@ class KernelDocDirective(Directive):
             result = ViewList()
 
             lineoffset = 0;
-            line_regex = re.compile("^#define LINENO ([0-9]+)$")
+            line_regex = re.compile("^\.\. LINENO ([0-9]+)$")
             for line in lines:
                 match = line_regex.search(line)
                 if match:
index 24d2b2addcce3d225fd735371bda9c1b4c3b81ba..cefdbb7e75230be7c30dac216cdb7169cf01aaf1 100644 (file)
@@ -212,7 +212,7 @@ def setupTools(app):
         if convert_cmd:
             kernellog.verbose(app, "use convert(1) from: " + convert_cmd)
         else:
-            kernellog.warn(app,
+            kernellog.verbose(app,
                 "Neither inkscape(1) nor convert(1) found.\n"
                 "For SVG to PDF conversion, "
                 "install either Inkscape (https://inkscape.org/) (preferred) or\n"
@@ -296,8 +296,10 @@ def convert_image(img_node, translator, src_fname=None):
 
         if translator.builder.format == 'latex':
             if not inkscape_cmd and convert_cmd is None:
-                kernellog.verbose(app,
-                                  "no SVG to PDF conversion available / include SVG raw.")
+                kernellog.warn(app,
+                                  "no SVG to PDF conversion available / include SVG raw."
+                                  "\nIncluding large raw SVGs can cause xelatex error."
+                                  "\nInstall Inkscape (preferred) or ImageMagick.")
                 img_node.replace_self(file2literal(src_fname))
             else:
                 dst_fname = path.join(translator.builder.outdir, fname + '.pdf')
index 9a35f50798a65f3c8a1f928ee46e724493a4f4b4..2c573541ab712fad55ed105aefd5adca30dcf757 100644 (file)
@@ -1,2 +1,4 @@
+# jinja2>=3.1 is not compatible with Sphinx<4.0
+jinja2<3.1
 sphinx_rtd_theme
 Sphinx==2.4.4
index bddedabaca80f6cd63dc817a8fc34814343dd570..c180936f49fc8b2c964a1264831192e720085b15 100644 (file)
@@ -7,7 +7,7 @@ user_events: User-based Event Tracing
 Overview
 --------
 User based trace events allow user processes to create events and trace data
-that can be viewed via existing tools, such as ftrace, perf and eBPF.
+that can be viewed via existing tools, such as ftrace and perf.
 To enable this feature, build your kernel with CONFIG_USER_EVENTS=y.
 
 Programs can view status of the events via
@@ -67,8 +67,7 @@ The command string format is as follows::
 
 Supported Flags
 ^^^^^^^^^^^^^^^
-**BPF_ITER** - EBPF programs attached to this event will get the raw iovec
-struct instead of any data copies for max performance.
+None yet
 
 Field Format
 ^^^^^^^^^^^^
@@ -160,7 +159,7 @@ The following values are defined to aid in checking what has been attached:
 
 **EVENT_STATUS_FTRACE** - Bit set if ftrace has been attached (Bit 0).
 
-**EVENT_STATUS_PERF** - Bit set if perf/eBPF has been attached (Bit 1).
+**EVENT_STATUS_PERF** - Bit set if perf has been attached (Bit 1).
 
 Writing Data
 ------------
@@ -204,13 +203,6 @@ It's advised for user programs to do the following::
 
 **NOTE:** *The write_index is not emitted out into the trace being recorded.*
 
-EBPF
-----
-EBPF programs that attach to a user-based event tracepoint are given a pointer
-to a struct user_bpf_context. The bpf context contains the data type (which can
-be a user or kernel buffer, or can be a pointer to the iovec) and the data
-length that was emitted (minus the write_index).
-
 Example Code
 ------------
 See sample code in samples/user_events.
index 07a45474abe905ac2dfeb11e37d2eb513efff550..85c7abc51af5216ba15b7cf8706948679ded39b6 100644 (file)
@@ -151,12 +151,6 @@ In order to create user controlled virtual machines on S390, check
 KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL as
 privileged user (CAP_SYS_ADMIN).
 
-To use hardware assisted virtualization on MIPS (VZ ASE) rather than
-the default trap & emulate implementation (which changes the virtual
-memory layout to fit in user mode), check KVM_CAP_MIPS_VZ and use the
-flag KVM_VM_MIPS_VZ.
-
-
 On arm64, the physical address size for a VM (IPA Size limit) is limited
 to 40bits by default. The limit can be configured if the host supports the
 extension KVM_CAP_ARM_VM_IPA_SIZE. When supported, use
@@ -4081,6 +4075,11 @@ x2APIC MSRs are always allowed, independent of the ``default_allow`` setting,
 and their behavior depends on the ``X2APIC_ENABLE`` bit of the APIC base
 register.
 
+.. warning::
+   MSR accesses coming from nested vmentry/vmexit are not filtered.
+   This includes both writes to individual VMCS fields and reads/writes
+   through the MSR lists pointed to by the VMCS.
+
 If a bit is within one of the defined ranges, read and write accesses are
 guarded by the bitmap's value for the MSR index if the kind of access
 is included in the ``struct kvm_msr_filter_range`` flags.  If no range
@@ -5293,6 +5292,10 @@ type values:
 
 KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO
   Sets the guest physical address of the vcpu_info for a given vCPU.
+  As with the shared_info page for the VM, the corresponding page may be
+  dirtied at any time if event channel interrupt delivery is enabled, so
+  userspace should always assume that the page is dirty without relying
+  on dirty logging.
 
 KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO
   Sets the guest physical address of an additional pvclock structure
@@ -6187,6 +6190,7 @@ Valid values for 'type' are:
                        unsigned long args[6];
                        unsigned long ret[2];
                } riscv_sbi;
+
 If exit reason is KVM_EXIT_RISCV_SBI then it indicates that the VCPU has
 done a SBI call which is not handled by KVM RISC-V kernel module. The details
 of the SBI call are available in 'riscv_sbi' member of kvm_run structure. The
@@ -7719,3 +7723,49 @@ only be invoked on a VM prior to the creation of VCPUs.
 At this time, KVM_PMU_CAP_DISABLE is the only capability.  Setting
 this capability will disable PMU virtualization for that VM.  Usermode
 should adjust CPUID leaf 0xA to reflect that the PMU is disabled.
+
+9. Known KVM API problems
+=========================
+
+In some cases, KVM's API has some inconsistencies or common pitfalls
+that userspace need to be aware of.  This section details some of
+these issues.
+
+Most of them are architecture specific, so the section is split by
+architecture.
+
+9.1. x86
+--------
+
+``KVM_GET_SUPPORTED_CPUID`` issues
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In general, ``KVM_GET_SUPPORTED_CPUID`` is designed so that it is possible
+to take its result and pass it directly to ``KVM_SET_CPUID2``.  This section
+documents some cases in which that requires some care.
+
+Local APIC features
+~~~~~~~~~~~~~~~~~~~
+
+CPU[EAX=1]:ECX[21] (X2APIC) is reported by ``KVM_GET_SUPPORTED_CPUID``,
+but it can only be enabled if ``KVM_CREATE_IRQCHIP`` or
+``KVM_ENABLE_CAP(KVM_CAP_IRQCHIP_SPLIT)`` are used to enable in-kernel emulation of
+the local APIC.
+
+The same is true for the ``KVM_FEATURE_PV_UNHALT`` paravirtualized feature.
+
+CPU[EAX=1]:ECX[24] (TSC_DEADLINE) is not reported by ``KVM_GET_SUPPORTED_CPUID``.
+It can be enabled if ``KVM_CAP_TSC_DEADLINE_TIMER`` is present and the kernel
+has enabled in-kernel emulation of the local APIC.
+
+Obsolete ioctls and capabilities
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+KVM_CAP_DISABLE_QUIRKS does not let userspace know which quirks are actually
+available.  Use ``KVM_CHECK_EXTENSION(KVM_CAP_DISABLE_QUIRKS2)`` instead if
+available.
+
+Ordering of KVM_GET_*/KVM_SET_* ioctls
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+TBD
index b6833c7bb474192b1ac78b26ab073c9bfee5ada1..e0a2c74e1043a09bc7a0c912c1215d8ffba8f1ff 100644 (file)
@@ -8,25 +8,13 @@ KVM
    :maxdepth: 2
 
    api
-   amd-memory-encryption
-   cpuid
-   halt-polling
-   hypercalls
-   locking
-   mmu
-   msr
-   nested-vmx
-   ppc-pv
-   s390-diag
-   s390-pv
-   s390-pv-boot
-   timekeeping
-   vcpu-requests
-
-   review-checklist
+   devices/index
 
    arm/index
+   s390/index
+   ppc-pv
+   x86/index
 
-   devices/index
-
-   running-nested-guests
+   locking
+   vcpu-requests
+   review-checklist
index 5d27da356836106efd78b3cb54582a8a811b252d..845a561629f19ddf236d13ec27c8df1ce9029637 100644 (file)
@@ -210,32 +210,47 @@ time it will be set using the Dirty tracking mechanism described above.
 3. Reference
 ------------
 
-:Name:         kvm_lock
+``kvm_lock``
+^^^^^^^^^^^^
+
 :Type:         mutex
 :Arch:         any
 :Protects:     - vm_list
 
-:Name:         kvm_count_lock
+``kvm_count_lock``
+^^^^^^^^^^^^^^^^^^
+
 :Type:         raw_spinlock_t
 :Arch:         any
 :Protects:     - hardware virtualization enable/disable
 :Comment:      'raw' because hardware enabling/disabling must be atomic /wrt
                migration.
 
-:Name:         kvm_arch::tsc_write_lock
-:Type:         raw_spinlock
+``kvm->mn_invalidate_lock``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+:Type:          spinlock_t
+:Arch:          any
+:Protects:      mn_active_invalidate_count, mn_memslots_update_rcuwait
+
+``kvm_arch::tsc_write_lock``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+:Type:         raw_spinlock_t
 :Arch:         x86
 :Protects:     - kvm_arch::{last_tsc_write,last_tsc_nsec,last_tsc_offset}
                - tsc offset in vmcb
 :Comment:      'raw' because updating the tsc offsets must not be preempted.
 
-:Name:         kvm->mmu_lock
-:Type:         spinlock_t
+``kvm->mmu_lock``
+^^^^^^^^^^^^^^^^^
+:Type:         spinlock_t or rwlock_t
 :Arch:         any
 :Protects:     -shadow page/shadow tlb entry
 :Comment:      it is a spinlock since it is used in mmu notifier.
 
-:Name:         kvm->srcu
+``kvm->srcu``
+^^^^^^^^^^^^^
 :Type:         srcu lock
 :Arch:         any
 :Protects:     - kvm->memslots
@@ -246,10 +261,20 @@ time it will be set using the Dirty tracking mechanism described above.
                The srcu index can be stored in kvm_vcpu->srcu_idx per vcpu
                if it is needed by multiple functions.
 
-:Name:         blocked_vcpu_on_cpu_lock
+``kvm->slots_arch_lock``
+^^^^^^^^^^^^^^^^^^^^^^^^
+:Type:          mutex
+:Arch:          any (only needed on x86 though)
+:Protects:      any arch-specific fields of memslots that have to be modified
+                in a ``kvm->srcu`` read-side critical section.
+:Comment:       must be held before reading the pointer to the current memslots,
+                until after all changes to the memslots are complete
+
+``wakeup_vcpus_on_cpu_lock``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 :Type:         spinlock_t
 :Arch:         x86
-:Protects:     blocked_vcpu_on_cpu
+:Protects:     wakeup_vcpus_on_cpu
 :Comment:      This is a per-CPU lock and it is used for VT-d posted-interrupts.
                When VT-d posted-interrupts is supported and the VM has assigned
                devices, we put the blocked vCPU on the list blocked_vcpu_on_cpu
diff --git a/Documentation/virt/kvm/s390/index.rst b/Documentation/virt/kvm/s390/index.rst
new file mode 100644 (file)
index 0000000..605f488
--- /dev/null
@@ -0,0 +1,12 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====================
+KVM for s390 systems
+====================
+
+.. toctree::
+   :maxdepth: 2
+
+   s390-diag
+   s390-pv
+   s390-pv-boot
index b61d48aec36c5e975cc572e096f1fc3a2390c931..31f62b64e07b9f75c2be45ad28c133e53bf9ad04 100644 (file)
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
 =================
 KVM VCPU Requests
 =================
@@ -135,6 +137,16 @@ KVM_REQ_UNHALT
   such as a pending signal, which does not indicate the VCPU's halt
   emulation should stop, and therefore does not make the request.
 
+KVM_REQ_OUTSIDE_GUEST_MODE
+
+  This "request" ensures the target vCPU has exited guest mode prior to the
+  sender of the request continuing on.  No action needs be taken by the target,
+  and so no request is actually logged for the target.  This request is similar
+  to a "kick", but unlike a kick it guarantees the vCPU has actually exited
+  guest mode.  A kick only guarantees the vCPU will exit at some point in the
+  future, e.g. a previous kick may have started the process, but there's no
+  guarantee the to-be-kicked vCPU has fully exited guest mode.
+
 KVM_REQUEST_MASK
 ----------------
 
similarity index 99%
rename from Documentation/virt/kvm/amd-memory-encryption.rst
rename to Documentation/virt/kvm/x86/amd-memory-encryption.rst
index 1c6847fff304975a1be800d2e0c335a7cc819c59..2d307811978c45ea57067163d364ee86680c5971 100644 (file)
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
 ======================================
 Secure Encrypted Virtualization (SEV)
 ======================================
diff --git a/Documentation/virt/kvm/x86/errata.rst b/Documentation/virt/kvm/x86/errata.rst
new file mode 100644 (file)
index 0000000..410e0aa
--- /dev/null
@@ -0,0 +1,39 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================================
+Known limitations of CPU virtualization
+=======================================
+
+Whenever perfect emulation of a CPU feature is impossible or too hard, KVM
+has to choose between not implementing the feature at all or introducing
+behavioral differences between virtual machines and bare metal systems.
+
+This file documents some of the known limitations that KVM has in
+virtualizing CPU features.
+
+x86
+===
+
+``KVM_GET_SUPPORTED_CPUID`` issues
+----------------------------------
+
+x87 features
+~~~~~~~~~~~~
+
+Unlike most other CPUID feature bits, CPUID[EAX=7,ECX=0]:EBX[6]
+(FDP_EXCPTN_ONLY) and CPUID[EAX=7,ECX=0]:EBX]13] (ZERO_FCS_FDS) are
+clear if the features are present and set if the features are not present.
+
+Clearing these bits in CPUID has no effect on the operation of the guest;
+if these bits are set on hardware, the features will not be present on
+any virtual machine that runs on that hardware.
+
+**Workaround:** It is recommended to always set these bits in guest CPUID.
+Note however that any software (e.g ``WIN87EM.DLL``) expecting these features
+to be present likely predates these CPUID feature bits, and therefore
+doesn't know to check for them anyway.
+
+Nested virtualization features
+------------------------------
+
+TBD
diff --git a/Documentation/virt/kvm/x86/index.rst b/Documentation/virt/kvm/x86/index.rst
new file mode 100644 (file)
index 0000000..7ff5888
--- /dev/null
@@ -0,0 +1,19 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================
+KVM for x86 systems
+===================
+
+.. toctree::
+   :maxdepth: 2
+
+   amd-memory-encryption
+   cpuid
+   errata
+   halt-polling
+   hypercalls
+   mmu
+   msr
+   nested-vmx
+   running-nested-guests
+   timekeeping
similarity index 99%
rename from Documentation/virt/kvm/running-nested-guests.rst
rename to Documentation/virt/kvm/x86/running-nested-guests.rst
index bd70c69468aebb236937232d41adce56654e3149..a27e6768d9008f7f01029b3dd4862943de9d9f13 100644 (file)
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
 ==============================
 Running nested guests with KVM
 ==============================
index d5ad96c795f6b7b54ea96a6466896c66f6e63183..863f67b72c05d80af82287603c9304236a11fabc 100644 (file)
@@ -1193,6 +1193,26 @@ E.g. ``os_close_file()`` is just a wrapper around ``close()``
 which ensures that the userspace function close does not clash
 with similarly named function(s) in the kernel part.
 
+Using UML as a Test Platform
+============================
+
+UML is an excellent test platform for device driver development. As
+with most things UML, "some user assembly may be required". It is
+up to the user to build their emulation environment. UML at present
+provides only the kernel infrastructure.
+
+Part of this infrastructure is the ability to load and parse fdt
+device tree blobs as used in Arm or Open Firmware platforms. These
+are supplied as an optional extra argument to the kernel command
+line::
+
+    dtb=filename
+
+The device tree is loaded and parsed at boottime and is accessible by
+drivers which query it. At this moment in time this facility is
+intended solely for development purposes. UML's own devices do not
+query the device tree.
+
 Security Considerations
 -----------------------
 
index c4de6f8dabe994388d6d3307f397c4609b43ac30..65204d7f004f238f37b0654cd91bcc2033fdf7a1 100644 (file)
@@ -125,7 +125,6 @@ Usage
    additional function:
 
        Cull:
-               -c              Cull by comparing stacktrace instead of total block.
                --cull <rules>
                                Specify culling rules.Culling syntax is key[,key[,...]].Choose a
                                multi-letter key from the **STANDARD FORMAT SPECIFIERS** section.
index eae3af17f2d9b6830f7d7c2fab4fe77791a58801..b280367d6a44c321e76f7396b6e7e520cda50aa1 100644 (file)
@@ -52,8 +52,13 @@ The infrastructure may also be able to handle other conditions that make pages
 unevictable, either by definition or by circumstance, in the future.
 
 
-The Unevictable Page List
--------------------------
+The Unevictable LRU Page List
+-----------------------------
+
+The Unevictable LRU page list is a lie.  It was never an LRU-ordered list, but a
+companion to the LRU-ordered anonymous and file, active and inactive page lists;
+and now it is not even a page list.  But following familiar convention, here in
+this document and in the source, we often imagine it as a fifth LRU page list.
 
 The Unevictable LRU infrastructure consists of an additional, per-node, LRU list
 called the "unevictable" list and an associated page flag, PG_unevictable, to
@@ -63,8 +68,8 @@ The PG_unevictable flag is analogous to, and mutually exclusive with, the
 PG_active flag in that it indicates on which LRU list a page resides when
 PG_lru is set.
 
-The Unevictable LRU infrastructure maintains unevictable pages on an additional
-LRU list for a few reasons:
+The Unevictable LRU infrastructure maintains unevictable pages as if they were
+on an additional LRU list for a few reasons:
 
  (1) We get to "treat unevictable pages just like we treat other pages in the
      system - which means we get to use the same code to manipulate them, the
@@ -72,13 +77,11 @@ LRU list for a few reasons:
      of the statistics, etc..." [Rik van Riel]
 
  (2) We want to be able to migrate unevictable pages between nodes for memory
-     defragmentation, workload management and memory hotplug.  The linux kernel
+     defragmentation, workload management and memory hotplug.  The Linux kernel
      can only migrate pages that it can successfully isolate from the LRU
-     lists.  If we were to maintain pages elsewhere than on an LRU-like list,
-     where they can be found by isolate_lru_page(), we would prevent their
-     migration, unless we reworked migration code to find the unevictable pages
-     itself.
-
+     lists (or "Movable" pages: outside of consideration here).  If we were to
+     maintain pages elsewhere than on an LRU-like list, where they can be
+     detected by isolate_lru_page(), we would prevent their migration.
 
 The unevictable list does not differentiate between file-backed and anonymous,
 swap-backed pages.  This differentiation is only important while the pages are,
@@ -92,8 +95,8 @@ Memory Control Group Interaction
 --------------------------------
 
 The unevictable LRU facility interacts with the memory control group [aka
-memory controller; see Documentation/admin-guide/cgroup-v1/memory.rst] by extending the
-lru_list enum.
+memory controller; see Documentation/admin-guide/cgroup-v1/memory.rst] by
+extending the lru_list enum.
 
 The memory controller data structure automatically gets a per-node unevictable
 list as a result of the "arrayification" of the per-node LRU lists (one per
@@ -143,7 +146,6 @@ These are currently used in three places in the kernel:
      and this mark remains for the life of the inode.
 
  (2) By SYSV SHM to mark SHM_LOCK'd address spaces until SHM_UNLOCK is called.
-
      Note that SHM_LOCK is not required to page in the locked pages if they're
      swapped out; the application must touch the pages manually if it wants to
      ensure they're in memory.
@@ -156,19 +158,19 @@ These are currently used in three places in the kernel:
 Detecting Unevictable Pages
 ---------------------------
 
-The function page_evictable() in vmscan.c determines whether a page is
+The function page_evictable() in mm/internal.h determines whether a page is
 evictable or not using the query function outlined above [see section
 :ref:`Marking address spaces unevictable <mark_addr_space_unevict>`]
 to check the AS_UNEVICTABLE flag.
 
 For address spaces that are so marked after being populated (as SHM regions
-might be), the lock action (eg: SHM_LOCK) can be lazy, and need not populate
+might be), the lock action (e.g. SHM_LOCK) can be lazy, and need not populate
 the page tables for the region as does, for example, mlock(), nor need it make
 any special effort to push any pages in the SHM_LOCK'd area to the unevictable
 list.  Instead, vmscan will do this if and when it encounters the pages during
 a reclamation scan.
 
-On an unlock action (such as SHM_UNLOCK), the unlocker (eg: shmctl()) must scan
+On an unlock action (such as SHM_UNLOCK), the unlocker (e.g. shmctl()) must scan
 the pages in the region and "rescue" them from the unevictable list if no other
 condition is keeping them unevictable.  If an unevictable region is destroyed,
 the pages are also "rescued" from the unevictable list in the process of
@@ -176,7 +178,7 @@ freeing them.
 
 page_evictable() also checks for mlocked pages by testing an additional page
 flag, PG_mlocked (as wrapped by PageMlocked()), which is set when a page is
-faulted into a VM_LOCKED vma, or found in a vma being VM_LOCKED.
+faulted into a VM_LOCKED VMA, or found in a VMA being VM_LOCKED.
 
 
 Vmscan's Handling of Unevictable Pages
@@ -186,28 +188,23 @@ If unevictable pages are culled in the fault path, or moved to the unevictable
 list at mlock() or mmap() time, vmscan will not encounter the pages until they
 have become evictable again (via munlock() for example) and have been "rescued"
 from the unevictable list.  However, there may be situations where we decide,
-for the sake of expediency, to leave a unevictable page on one of the regular
+for the sake of expediency, to leave an unevictable page on one of the regular
 active/inactive LRU lists for vmscan to deal with.  vmscan checks for such
 pages in all of the shrink_{active|inactive|page}_list() functions and will
 "cull" such pages that it encounters: that is, it diverts those pages to the
-unevictable list for the node being scanned.
+unevictable list for the memory cgroup and node being scanned.
 
 There may be situations where a page is mapped into a VM_LOCKED VMA, but the
 page is not marked as PG_mlocked.  Such pages will make it all the way to
-shrink_page_list() where they will be detected when vmscan walks the reverse
-map in try_to_unmap().  If try_to_unmap() returns SWAP_MLOCK,
-shrink_page_list() will cull the page at that point.
+shrink_active_list() or shrink_page_list() where they will be detected when
+vmscan walks the reverse map in page_referenced() or try_to_unmap().  The page
+is culled to the unevictable list when it is released by the shrinker.
 
 To "cull" an unevictable page, vmscan simply puts the page back on the LRU list
 using putback_lru_page() - the inverse operation to isolate_lru_page() - after
 dropping the page lock.  Because the condition which makes the page unevictable
-may change once the page is unlocked, putback_lru_page() will recheck the
-unevictable state of a page that it places on the unevictable list.  If the
-page has become unevictable, putback_lru_page() removes it from the list and
-retries, including the page_unevictable() test.  Because such a race is a rare
-event and movement of pages onto the unevictable list should be rare, these
-extra evictabilty checks should not occur in the majority of calls to
-putback_lru_page().
+may change once the page is unlocked, __pagevec_lru_add_fn() will recheck the
+unevictable state of a page before placing it on the unevictable list.
 
 
 MLOCKED Pages
@@ -227,16 +224,25 @@ Nick posted his patch as an alternative to a patch posted by Christoph Lameter
 to achieve the same objective: hiding mlocked pages from vmscan.
 
 In Nick's patch, he used one of the struct page LRU list link fields as a count
-of VM_LOCKED VMAs that map the page.  This use of the link field for a count
-prevented the management of the pages on an LRU list, and thus mlocked pages
-were not migratable as isolate_lru_page() could not find them, and the LRU list
-link field was not available to the migration subsystem.
+of VM_LOCKED VMAs that map the page (Rik van Riel had the same idea three years
+earlier).  But this use of the link field for a count prevented the management
+of the pages on an LRU list, and thus mlocked pages were not migratable as
+isolate_lru_page() could not detect them, and the LRU list link field was not
+available to the migration subsystem.
 
-Nick resolved this by putting mlocked pages back on the lru list before
+Nick resolved this by putting mlocked pages back on the LRU list before
 attempting to isolate them, thus abandoning the count of VM_LOCKED VMAs.  When
 Nick's patch was integrated with the Unevictable LRU work, the count was
-replaced by walking the reverse map to determine whether any VM_LOCKED VMAs
-mapped the page.  More on this below.
+replaced by walking the reverse map when munlocking, to determine whether any
+other VM_LOCKED VMAs still mapped the page.
+
+However, walking the reverse map for each page when munlocking was ugly and
+inefficient, and could lead to catastrophic contention on a file's rmap lock,
+when many processes which had it mlocked were trying to exit.  In 5.18, the
+idea of keeping mlock_count in Unevictable LRU list link field was revived and
+put to work, without preventing the migration of mlocked pages.  This is why
+the "Unevictable LRU list" cannot be a linked list of pages now; but there was
+no use for that linked list anyway - though its size is maintained for meminfo.
 
 
 Basic Management
@@ -250,22 +256,18 @@ PageMlocked() functions.
 A PG_mlocked page will be placed on the unevictable list when it is added to
 the LRU.  Such pages can be "noticed" by memory management in several places:
 
- (1) in the mlock()/mlockall() system call handlers;
+ (1) in the mlock()/mlock2()/mlockall() system call handlers;
 
  (2) in the mmap() system call handler when mmapping a region with the
      MAP_LOCKED flag;
 
  (3) mmapping a region in a task that has called mlockall() with the MCL_FUTURE
-     flag
+     flag;
 
- (4) in the fault path, if mlocked pages are "culled" in the fault path,
-     and when a VM_LOCKED stack segment is expanded; or
+ (4) in the fault path and when a VM_LOCKED stack segment is expanded; or
 
  (5) as mentioned above, in vmscan:shrink_page_list() when attempting to
-     reclaim a page in a VM_LOCKED VMA via try_to_unmap()
-
-all of which result in the VM_LOCKED flag being set for the VMA if it doesn't
-already have it set.
+     reclaim a page in a VM_LOCKED VMA by page_referenced() or try_to_unmap().
 
 mlocked pages become unlocked and rescued from the unevictable list when:
 
@@ -280,51 +282,53 @@ mlocked pages become unlocked and rescued from the unevictable list when:
  (4) before a page is COW'd in a VM_LOCKED VMA.
 
 
-mlock()/mlockall() System Call Handling
----------------------------------------
+mlock()/mlock2()/mlockall() System Call Handling
+------------------------------------------------
 
-Both [do\_]mlock() and [do\_]mlockall() system call handlers call mlock_fixup()
+mlock(), mlock2() and mlockall() system call handlers proceed to mlock_fixup()
 for each VMA in the range specified by the call.  In the case of mlockall(),
 this is the entire active address space of the task.  Note that mlock_fixup()
 is used for both mlocking and munlocking a range of memory.  A call to mlock()
-an already VM_LOCKED VMA, or to munlock() a VMA that is not VM_LOCKED is
-treated as a no-op, and mlock_fixup() simply returns.
+an already VM_LOCKED VMA, or to munlock() a VMA that is not VM_LOCKED, is
+treated as a no-op and mlock_fixup() simply returns.
 
-If the VMA passes some filtering as described in "Filtering Special Vmas"
+If the VMA passes some filtering as described in "Filtering Special VMAs"
 below, mlock_fixup() will attempt to merge the VMA with its neighbors or split
-off a subset of the VMA if the range does not cover the entire VMA.  Once the
-VMA has been merged or split or neither, mlock_fixup() will call
-populate_vma_page_range() to fault in the pages via get_user_pages() and to
-mark the pages as mlocked via mlock_vma_page().
+off a subset of the VMA if the range does not cover the entire VMA.  Any pages
+already present in the VMA are then marked as mlocked by mlock_page() via
+mlock_pte_range() via walk_page_range() via mlock_vma_pages_range().
+
+Before returning from the system call, do_mlock() or mlockall() will call
+__mm_populate() to fault in the remaining pages via get_user_pages() and to
+mark those pages as mlocked as they are faulted.
 
 Note that the VMA being mlocked might be mapped with PROT_NONE.  In this case,
 get_user_pages() will be unable to fault in the pages.  That's okay.  If pages
-do end up getting faulted into this VM_LOCKED VMA, we'll handle them in the
-fault path or in vmscan.
-
-Also note that a page returned by get_user_pages() could be truncated or
-migrated out from under us, while we're trying to mlock it.  To detect this,
-populate_vma_page_range() checks page_mapping() after acquiring the page lock.
-If the page is still associated with its mapping, we'll go ahead and call
-mlock_vma_page().  If the mapping is gone, we just unlock the page and move on.
-In the worst case, this will result in a page mapped in a VM_LOCKED VMA
-remaining on a normal LRU list without being PageMlocked().  Again, vmscan will
-detect and cull such pages.
-
-mlock_vma_page() will call TestSetPageMlocked() for each page returned by
-get_user_pages().  We use TestSetPageMlocked() because the page might already
-be mlocked by another task/VMA and we don't want to do extra work.  We
-especially do not want to count an mlocked page more than once in the
-statistics.  If the page was already mlocked, mlock_vma_page() need do nothing
-more.
-
-If the page was NOT already mlocked, mlock_vma_page() attempts to isolate the
-page from the LRU, as it is likely on the appropriate active or inactive list
-at that time.  If the isolate_lru_page() succeeds, mlock_vma_page() will put
-back the page - by calling putback_lru_page() - which will notice that the page
-is now mlocked and divert the page to the node's unevictable list.  If
-mlock_vma_page() is unable to isolate the page from the LRU, vmscan will handle
-it later if and when it attempts to reclaim the page.
+do end up getting faulted into this VM_LOCKED VMA, they will be handled in the
+fault path - which is also how mlock2()'s MLOCK_ONFAULT areas are handled.
+
+For each PTE (or PMD) being faulted into a VMA, the page add rmap function
+calls mlock_vma_page(), which calls mlock_page() when the VMA is VM_LOCKED
+(unless it is a PTE mapping of a part of a transparent huge page).  Or when
+it is a newly allocated anonymous page, lru_cache_add_inactive_or_unevictable()
+calls mlock_new_page() instead: similar to mlock_page(), but can make better
+judgments, since this page is held exclusively and known not to be on LRU yet.
+
+mlock_page() sets PageMlocked immediately, then places the page on the CPU's
+mlock pagevec, to batch up the rest of the work to be done under lru_lock by
+__mlock_page().  __mlock_page() sets PageUnevictable, initializes mlock_count
+and moves the page to unevictable state ("the unevictable LRU", but with
+mlock_count in place of LRU threading).  Or if the page was already PageLRU
+and PageUnevictable and PageMlocked, it simply increments the mlock_count.
+
+But in practice that may not work ideally: the page may not yet be on an LRU, or
+it may have been temporarily isolated from LRU.  In such cases the mlock_count
+field cannot be touched, but will be set to 0 later when __pagevec_lru_add_fn()
+returns the page to "LRU".  Races prohibit mlock_count from being set to 1 then:
+rather than risk stranding a page indefinitely as unevictable, always err with
+mlock_count on the low side, so that when munlocked the page will be rescued to
+an evictable LRU, then perhaps be mlocked again later if vmscan finds it in a
+VM_LOCKED VMA.
 
 
 Filtering Special VMAs
@@ -339,68 +343,48 @@ mlock_fixup() filters several classes of "special" VMAs:
    so there is no sense in attempting to visit them.
 
 2) VMAs mapping hugetlbfs page are already effectively pinned into memory.  We
-   neither need nor want to mlock() these pages.  However, to preserve the
-   prior behavior of mlock() - before the unevictable/mlock changes -
-   mlock_fixup() will call make_pages_present() in the hugetlbfs VMA range to
-   allocate the huge pages and populate the ptes.
+   neither need nor want to mlock() these pages.  But __mm_populate() includes
+   hugetlbfs ranges, allocating the huge pages and populating the PTEs.
 
 3) VMAs with VM_DONTEXPAND are generally userspace mappings of kernel pages,
-   such as the VDSO page, relay channel pages, etc. These pages
-   are inherently unevictable and are not managed on the LRU lists.
-   mlock_fixup() treats these VMAs the same as hugetlbfs VMAs.  It calls
-   make_pages_present() to populate the ptes.
+   such as the VDSO page, relay channel pages, etc.  These pages are inherently
+   unevictable and are not managed on the LRU lists.  __mm_populate() includes
+   these ranges, populating the PTEs if not already populated.
+
+4) VMAs with VM_MIXEDMAP set are not marked VM_LOCKED, but __mm_populate()
+   includes these ranges, populating the PTEs if not already populated.
 
 Note that for all of these special VMAs, mlock_fixup() does not set the
 VM_LOCKED flag.  Therefore, we won't have to deal with them later during
 munlock(), munmap() or task exit.  Neither does mlock_fixup() account these
 VMAs against the task's "locked_vm".
 
-.. _munlock_munlockall_handling:
 
 munlock()/munlockall() System Call Handling
 -------------------------------------------
 
-The munlock() and munlockall() system calls are handled by the same functions -
-do_mlock[all]() - as the mlock() and mlockall() system calls with the unlock vs
-lock operation indicated by an argument.  So, these system calls are also
-handled by mlock_fixup().  Again, if called for an already munlocked VMA,
-mlock_fixup() simply returns.  Because of the VMA filtering discussed above,
-VM_LOCKED will not be set in any "special" VMAs.  So, these VMAs will be
-ignored for munlock.
+The munlock() and munlockall() system calls are handled by the same
+mlock_fixup() function as mlock(), mlock2() and mlockall() system calls are.
+If called to munlock an already munlocked VMA, mlock_fixup() simply returns.
+Because of the VMA filtering discussed above, VM_LOCKED will not be set in
+any "special" VMAs.  So, those VMAs will be ignored for munlock.
 
 If the VMA is VM_LOCKED, mlock_fixup() again attempts to merge or split off the
-specified range.  The range is then munlocked via the function
-populate_vma_page_range() - the same function used to mlock a VMA range -
-passing a flag to indicate that munlock() is being performed.
-
-Because the VMA access protections could have been changed to PROT_NONE after
-faulting in and mlocking pages, get_user_pages() was unreliable for visiting
-these pages for munlocking.  Because we don't want to leave pages mlocked,
-get_user_pages() was enhanced to accept a flag to ignore the permissions when
-fetching the pages - all of which should be resident as a result of previous
-mlocking.
-
-For munlock(), populate_vma_page_range() unlocks individual pages by calling
-munlock_vma_page().  munlock_vma_page() unconditionally clears the PG_mlocked
-flag using TestClearPageMlocked().  As with mlock_vma_page(),
-munlock_vma_page() use the Test*PageMlocked() function to handle the case where
-the page might have already been unlocked by another task.  If the page was
-mlocked, munlock_vma_page() updates that zone statistics for the number of
-mlocked pages.  Note, however, that at this point we haven't checked whether
-the page is mapped by other VM_LOCKED VMAs.
-
-We can't call page_mlock(), the function that walks the reverse map to
-check for other VM_LOCKED VMAs, without first isolating the page from the LRU.
-page_mlock() is a variant of try_to_unmap() and thus requires that the page
-not be on an LRU list [more on these below].  However, the call to
-isolate_lru_page() could fail, in which case we can't call page_mlock().  So,
-we go ahead and clear PG_mlocked up front, as this might be the only chance we
-have.  If we can successfully isolate the page, we go ahead and call
-page_mlock(), which will restore the PG_mlocked flag and update the zone
-page statistics if it finds another VMA holding the page mlocked.  If we fail
-to isolate the page, we'll have left a potentially mlocked page on the LRU.
-This is fine, because we'll catch it later if and if vmscan tries to reclaim
-the page.  This should be relatively rare.
+specified range.  All pages in the VMA are then munlocked by munlock_page() via
+mlock_pte_range() via walk_page_range() via mlock_vma_pages_range() - the same
+function used when mlocking a VMA range, with new flags for the VMA indicating
+that it is munlock() being performed.
+
+munlock_page() uses the mlock pagevec to batch up work to be done under
+lru_lock by  __munlock_page().  __munlock_page() decrements the page's
+mlock_count, and when that reaches 0 it clears PageMlocked and clears
+PageUnevictable, moving the page from unevictable state to inactive LRU.
+
+But in practice that may not work ideally: the page may not yet have reached
+"the unevictable LRU", or it may have been temporarily isolated from it.  In
+those cases its mlock_count field is unusable and must be assumed to be 0: so
+that the page will be rescued to an evictable LRU, then perhaps be mlocked
+again later if vmscan finds it in a VM_LOCKED VMA.
 
 
 Migrating MLOCKED Pages
@@ -410,33 +394,38 @@ A page that is being migrated has been isolated from the LRU lists and is held
 locked across unmapping of the page, updating the page's address space entry
 and copying the contents and state, until the page table entry has been
 replaced with an entry that refers to the new page.  Linux supports migration
-of mlocked pages and other unevictable pages.  This involves simply moving the
-PG_mlocked and PG_unevictable states from the old page to the new page.
+of mlocked pages and other unevictable pages.  PG_mlocked is cleared from the
+the old page when it is unmapped from the last VM_LOCKED VMA, and set when the
+new page is mapped in place of migration entry in a VM_LOCKED VMA.  If the page
+was unevictable because mlocked, PG_unevictable follows PG_mlocked; but if the
+page was unevictable for other reasons, PG_unevictable is copied explicitly.
 
 Note that page migration can race with mlocking or munlocking of the same page.
-This has been discussed from the mlock/munlock perspective in the respective
-sections above.  Both processes (migration and m[un]locking) hold the page
-locked.  This provides the first level of synchronization.  Page migration
-zeros out the page_mapping of the old page before unlocking it, so m[un]lock
-can skip these pages by testing the page mapping under page lock.
+There is mostly no problem since page migration requires unmapping all PTEs of
+the old page (including munlock where VM_LOCKED), then mapping in the new page
+(including mlock where VM_LOCKED).  The page table locks provide sufficient
+synchronization.
 
-To complete page migration, we place the new and old pages back onto the LRU
-after dropping the page lock.  The "unneeded" page - old page on success, new
-page on failure - will be freed when the reference count held by the migration
-process is released.  To ensure that we don't strand pages on the unevictable
-list because of a race between munlock and migration, page migration uses the
-putback_lru_page() function to add migrated pages back to the LRU.
+However, since mlock_vma_pages_range() starts by setting VM_LOCKED on a VMA,
+before mlocking any pages already present, if one of those pages were migrated
+before mlock_pte_range() reached it, it would get counted twice in mlock_count.
+To prevent that, mlock_vma_pages_range() temporarily marks the VMA as VM_IO,
+so that mlock_vma_page() will skip it.
+
+To complete page migration, we place the old and new pages back onto the LRU
+afterwards.  The "unneeded" page - old page on success, new page on failure -
+is freed when the reference count held by the migration process is released.
 
 
 Compacting MLOCKED Pages
 ------------------------
 
-The unevictable LRU can be scanned for compactable regions and the default
-behavior is to do so.  /proc/sys/vm/compact_unevictable_allowed controls
-this behavior (see Documentation/admin-guide/sysctl/vm.rst).  Once scanning of the
-unevictable LRU is enabled, the work of compaction is mostly handled by
-the page migration code and the same work flow as described in MIGRATING
-MLOCKED PAGES will apply.
+The memory map can be scanned for compactable regions and the default behavior
+is to let unevictable pages be moved.  /proc/sys/vm/compact_unevictable_allowed
+controls this behavior (see Documentation/admin-guide/sysctl/vm.rst).  The work
+of compaction is mostly handled by the page migration code and the same work
+flow as described in Migrating MLOCKED Pages will apply.
+
 
 MLOCKING Transparent Huge Pages
 -------------------------------
@@ -445,51 +434,44 @@ A transparent huge page is represented by a single entry on an LRU list.
 Therefore, we can only make unevictable an entire compound page, not
 individual subpages.
 
-If a user tries to mlock() part of a huge page, we want the rest of the
-page to be reclaimable.
+If a user tries to mlock() part of a huge page, and no user mlock()s the
+whole of the huge page, we want the rest of the page to be reclaimable.
 
 We cannot just split the page on partial mlock() as split_huge_page() can
-fail and new intermittent failure mode for the syscall is undesirable.
+fail and new intermittent failure mode for the syscall is undesirable.
 
-We handle this by keeping PTE-mapped huge pages on normal LRU lists: the
-PMD on border of VM_LOCKED VMA will be split into PTE table.
+We handle this by keeping PTE-mlocked huge pages on evictable LRU lists:
+the PMD on the border of a VM_LOCKED VMA will be split into a PTE table.
 
-This way the huge page is accessible for vmscan. Under memory pressure the
+This way the huge page is accessible for vmscan.  Under memory pressure the
 page will be split, subpages which belong to VM_LOCKED VMAs will be moved
-to unevictable LRU and the rest can be reclaimed.
+to the unevictable LRU and the rest can be reclaimed.
+
+/proc/meminfo's Unevictable and Mlocked amounts do not include those parts
+of a transparent huge page which are mapped only by PTEs in VM_LOCKED VMAs.
 
-See also comment in follow_trans_huge_pmd().
 
 mmap(MAP_LOCKED) System Call Handling
 -------------------------------------
 
-In addition the mlock()/mlockall() system calls, an application can request
-that a region of memory be mlocked supplying the MAP_LOCKED flag to the mmap()
-call. There is one important and subtle difference here, though. mmap() + mlock()
-will fail if the range cannot be faulted in (e.g. because mm_populate fails)
-and returns with ENOMEM while mmap(MAP_LOCKED) will not fail. The mmaped
-area will still have properties of the locked area - aka. pages will not get
-swapped out - but major page faults to fault memory in might still happen.
+In addition to the mlock(), mlock2() and mlockall() system calls, an application
+can request that a region of memory be mlocked by supplying the MAP_LOCKED flag
+to the mmap() call.  There is one important and subtle difference here, though.
+mmap() + mlock() will fail if the range cannot be faulted in (e.g. because
+mm_populate fails) and returns with ENOMEM while mmap(MAP_LOCKED) will not fail.
+The mmaped area will still have properties of the locked area - pages will not
+get swapped out - but major page faults to fault memory in might still happen.
 
-Furthermore, any mmap() call or brk() call that expands the heap by a
-task that has previously called mlockall() with the MCL_FUTURE flag will result
+Furthermore, any mmap() call or brk() call that expands the heap by a task
+that has previously called mlockall() with the MCL_FUTURE flag will result
 in the newly mapped memory being mlocked.  Before the unevictable/mlock
-changes, the kernel simply called make_pages_present() to allocate pages and
-populate the page table.
+changes, the kernel simply called make_pages_present() to allocate pages
+and populate the page table.
 
-To mlock a range of memory under the unevictable/mlock infrastructure, the
-mmap() handler and task address space expansion functions call
+To mlock a range of memory under the unevictable/mlock infrastructure,
+the mmap() handler and task address space expansion functions call
 populate_vma_page_range() specifying the vma and the address range to mlock.
 
-The callers of populate_vma_page_range() will have already added the memory range
-to be mlocked to the task's "locked_vm".  To account for filtered VMAs,
-populate_vma_page_range() returns the number of pages NOT mlocked.  All of the
-callers then subtract a non-negative return value from the task's locked_vm.  A
-negative return value represent an error - for example, from get_user_pages()
-attempting to fault in a VMA with PROT_NONE access.  In this case, we leave the
-memory range accounted as locked_vm, as the protections could be changed later
-and pages allocated into that region.
-
 
 munmap()/exit()/exec() System Call Handling
 -------------------------------------------
@@ -500,81 +482,53 @@ munlock the pages if we're removing the last VM_LOCKED VMA that maps the pages.
 Before the unevictable/mlock changes, mlocking did not mark the pages in any
 way, so unmapping them required no processing.
 
-To munlock a range of memory under the unevictable/mlock infrastructure, the
-munmap() handler and task address space call tear down function
-munlock_vma_pages_all().  The name reflects the observation that one always
-specifies the entire VMA range when munlock()ing during unmap of a region.
-Because of the VMA filtering when mlocking() regions, only "normal" VMAs that
-actually contain mlocked pages will be passed to munlock_vma_pages_all().
-
-munlock_vma_pages_all() clears the VM_LOCKED VMA flag and, like mlock_fixup()
-for the munlock case, calls __munlock_vma_pages_range() to walk the page table
-for the VMA's memory range and munlock_vma_page() each resident page mapped by
-the VMA.  This effectively munlocks the page, only if this is the last
-VM_LOCKED VMA that maps the page.
-
-
-try_to_unmap()
---------------
-
-Pages can, of course, be mapped into multiple VMAs.  Some of these VMAs may
-have VM_LOCKED flag set.  It is possible for a page mapped into one or more
-VM_LOCKED VMAs not to have the PG_mlocked flag set and therefore reside on one
-of the active or inactive LRU lists.  This could happen if, for example, a task
-in the process of munlocking the page could not isolate the page from the LRU.
-As a result, vmscan/shrink_page_list() might encounter such a page as described
-in section "vmscan's handling of unevictable pages".  To handle this situation,
-try_to_unmap() checks for VM_LOCKED VMAs while it is walking a page's reverse
-map.
-
-try_to_unmap() is always called, by either vmscan for reclaim or for page
-migration, with the argument page locked and isolated from the LRU.  Separate
-functions handle anonymous and mapped file and KSM pages, as these types of
-pages have different reverse map lookup mechanisms, with different locking.
-In each case, whether rmap_walk_anon() or rmap_walk_file() or rmap_walk_ksm(),
-it will call try_to_unmap_one() for every VMA which might contain the page.
-
-When trying to reclaim, if try_to_unmap_one() finds the page in a VM_LOCKED
-VMA, it will then mlock the page via mlock_vma_page() instead of unmapping it,
-and return SWAP_MLOCK to indicate that the page is unevictable: and the scan
-stops there.
-
-mlock_vma_page() is called while holding the page table's lock (in addition
-to the page lock, and the rmap lock): to serialize against concurrent mlock or
-munlock or munmap system calls, mm teardown (munlock_vma_pages_all), reclaim,
-holepunching, and truncation of file pages and their anonymous COWed pages.
-
-
-page_mlock() Reverse Map Scan
----------------------------------
-
-When munlock_vma_page() [see section :ref:`munlock()/munlockall() System Call
-Handling <munlock_munlockall_handling>` above] tries to munlock a
-page, it needs to determine whether or not the page is mapped by any
-VM_LOCKED VMA without actually attempting to unmap all PTEs from the
-page.  For this purpose, the unevictable/mlock infrastructure
-introduced a variant of try_to_unmap() called page_mlock().
-
-page_mlock() walks the respective reverse maps looking for VM_LOCKED VMAs. When
-such a VMA is found the page is mlocked via mlock_vma_page(). This undoes the
-pre-clearing of the page's PG_mlocked done by munlock_vma_page.
-
-Note that page_mlock()'s reverse map walk must visit every VMA in a page's
-reverse map to determine that a page is NOT mapped into any VM_LOCKED VMA.
-However, the scan can terminate when it encounters a VM_LOCKED VMA.
-Although page_mlock() might be called a great many times when munlocking a
-large region or tearing down a large address space that has been mlocked via
-mlockall(), overall this is a fairly rare event.
+For each PTE (or PMD) being unmapped from a VMA, page_remove_rmap() calls
+munlock_vma_page(), which calls munlock_page() when the VMA is VM_LOCKED
+(unless it was a PTE mapping of a part of a transparent huge page).
+
+munlock_page() uses the mlock pagevec to batch up work to be done under
+lru_lock by  __munlock_page().  __munlock_page() decrements the page's
+mlock_count, and when that reaches 0 it clears PageMlocked and clears
+PageUnevictable, moving the page from unevictable state to inactive LRU.
+
+But in practice that may not work ideally: the page may not yet have reached
+"the unevictable LRU", or it may have been temporarily isolated from it.  In
+those cases its mlock_count field is unusable and must be assumed to be 0: so
+that the page will be rescued to an evictable LRU, then perhaps be mlocked
+again later if vmscan finds it in a VM_LOCKED VMA.
+
+
+Truncating MLOCKED Pages
+------------------------
+
+File truncation or hole punching forcibly unmaps the deleted pages from
+userspace; truncation even unmaps and deletes any private anonymous pages
+which had been Copied-On-Write from the file pages now being truncated.
+
+Mlocked pages can be munlocked and deleted in this way: like with munmap(),
+for each PTE (or PMD) being unmapped from a VMA, page_remove_rmap() calls
+munlock_vma_page(), which calls munlock_page() when the VMA is VM_LOCKED
+(unless it was a PTE mapping of a part of a transparent huge page).
+
+However, if there is a racing munlock(), since mlock_vma_pages_range() starts
+munlocking by clearing VM_LOCKED from a VMA, before munlocking all the pages
+present, if one of those pages were unmapped by truncation or hole punch before
+mlock_pte_range() reached it, it would not be recognized as mlocked by this VMA,
+and would not be counted out of mlock_count.  In this rare case, a page may
+still appear as PageMlocked after it has been fully unmapped: and it is left to
+release_pages() (or __page_cache_release()) to clear it and update statistics
+before freeing (this event is counted in /proc/vmstat unevictable_pgs_cleared,
+which is usually 0).
 
 
 Page Reclaim in shrink_*_list()
 -------------------------------
 
-shrink_active_list() culls any obviously unevictable pages - i.e.
-!page_evictable(page) - diverting these to the unevictable list.
+vmscan's shrink_active_list() culls any obviously unevictable pages -
+i.e. !page_evictable(page) pages - diverting those to the unevictable list.
 However, shrink_active_list() only sees unevictable pages that made it onto the
-active/inactive lru lists.  Note that these pages do not have PageUnevictable
-set - otherwise they would be on the unevictable list and shrink_active_list
+active/inactive LRU lists.  Note that these pages do not have PageUnevictable
+set - otherwise they would be on the unevictable list and shrink_active_list()
 would never see them.
 
 Some examples of these unevictable pages on the LRU lists are:
@@ -586,20 +540,15 @@ Some examples of these unevictable pages on the LRU lists are:
      when an application accesses the page the first time after SHM_LOCK'ing
      the segment.
 
- (3) mlocked pages that could not be isolated from the LRU and moved to the
-     unevictable list in mlock_vma_page().
-
-shrink_inactive_list() also diverts any unevictable pages that it finds on the
-inactive lists to the appropriate node's unevictable list.
+ (3) pages still mapped into VM_LOCKED VMAs, which should be marked mlocked,
+     but events left mlock_count too low, so they were munlocked too early.
 
-shrink_inactive_list() should only see SHM_LOCK'd pages that became SHM_LOCK'd
-after shrink_active_list() had moved them to the inactive list, or pages mapped
-into VM_LOCKED VMAs that munlock_vma_page() couldn't isolate from the LRU to
-recheck via page_mlock().  shrink_inactive_list() won't notice the latter,
-but will pass on to shrink_page_list().
+vmscan's shrink_inactive_list() and shrink_page_list() also divert obviously
+unevictable pages found on the inactive lists to the appropriate memory cgroup
+and node unevictable list.
 
-shrink_page_list() again culls obviously unevictable pages that it could
-encounter for similar reason to shrink_inactive_list().  Pages mapped into
-VM_LOCKED VMAs but without PG_mlocked set will make it all the way to
-try_to_unmap().  shrink_page_list() will divert them to the unevictable list
-when try_to_unmap() returns SWAP_MLOCK, as discussed above.
+rmap's page_referenced_one(), called via vmscan's shrink_active_list() or
+shrink_page_list(), and rmap's try_to_unmap_one() called via shrink_page_list(),
+check for (3) pages still mapped into VM_LOCKED VMAs, and call mlock_vma_page()
+to correct them.  Such pages are culled to the unevictable list when released
+by the shrinker.
index cf896438366bfd367a989d2cbb08aede261a9fb6..3c0f56b44c619d7d050df585e17b67ff4041ab7b 100644 (file)
@@ -201,6 +201,7 @@ F:  include/net/ieee80211_radiotap.h
 F:     include/net/iw_handler.h
 F:     include/net/wext.h
 F:     include/uapi/linux/nl80211.h
+F:     include/uapi/linux/wireless.h
 F:     net/wireless/
 
 8169 10/100/1000 GIGABIT ETHERNET DRIVER
@@ -2636,7 +2637,7 @@ F:        sound/soc/rockchip/
 N:     rockchip
 
 ARM/SAMSUNG S3C, S5P AND EXYNOS ARM ARCHITECTURES
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 R:     Alim Akhtar <alim.akhtar@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org
@@ -3742,7 +3743,7 @@ F:        include/linux/platform_data/b53.h
 
 BROADCOM BCM2711/BCM2835 ARM ARCHITECTURE
 M:     Nicolas Saenz Julienne <nsaenz@kernel.org>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
@@ -3757,7 +3758,7 @@ BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
 M:     Florian Fainelli <f.fainelli@gmail.com>
 M:     Ray Jui <rjui@broadcom.com>
 M:     Scott Branden <sbranden@broadcom.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 S:     Maintained
 T:     git git://github.com/broadcom/mach-bcm
 F:     arch/arm/mach-bcm/
@@ -3777,7 +3778,7 @@ F:        arch/mips/include/asm/mach-bcm47xx/*
 
 BROADCOM BCM4908 ETHERNET DRIVER
 M:     Rafał Miłecki <rafal@milecki.pl>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml
@@ -3786,7 +3787,7 @@ F:        drivers/net/ethernet/broadcom/unimac.h
 
 BROADCOM BCM4908 PINMUX DRIVER
 M:     Rafał Miłecki <rafal@milecki.pl>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-gpio@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/pinctrl/brcm,bcm4908-pinctrl.yaml
@@ -3796,7 +3797,7 @@ BROADCOM BCM5301X ARM ARCHITECTURE
 M:     Florian Fainelli <f.fainelli@gmail.com>
 M:     Hauke Mehrtens <hauke@hauke-m.de>
 M:     Rafał Miłecki <zajec5@gmail.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/boot/dts/bcm470*
@@ -3807,7 +3808,7 @@ F:        arch/arm/mach-bcm/bcm_5301x.c
 BROADCOM BCM53573 ARM ARCHITECTURE
 M:     Florian Fainelli <f.fainelli@gmail.com>
 M:     Rafał Miłecki <rafal@milecki.pl>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/boot/dts/bcm47189*
@@ -3815,7 +3816,7 @@ F:        arch/arm/boot/dts/bcm53573*
 
 BROADCOM BCM63XX ARM ARCHITECTURE
 M:     Florian Fainelli <f.fainelli@gmail.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://github.com/broadcom/stblinux.git
@@ -3829,7 +3830,7 @@ F:        drivers/usb/gadget/udc/bcm63xx_udc.*
 
 BROADCOM BCM7XXX ARM ARCHITECTURE
 M:     Florian Fainelli <f.fainelli@gmail.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://github.com/broadcom/stblinux.git
@@ -3847,21 +3848,21 @@ N:      bcm7120
 BROADCOM BDC DRIVER
 M:     Al Cooper <alcooperx@gmail.com>
 L:     linux-usb@vger.kernel.org
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 S:     Maintained
 F:     Documentation/devicetree/bindings/usb/brcm,bdc.yaml
 F:     drivers/usb/gadget/udc/bdc/
 
 BROADCOM BMIPS CPUFREQ DRIVER
 M:     Markus Mayer <mmayer@broadcom.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-pm@vger.kernel.org
 S:     Maintained
 F:     drivers/cpufreq/bmips-cpufreq.c
 
 BROADCOM BMIPS MIPS ARCHITECTURE
 M:     Florian Fainelli <f.fainelli@gmail.com>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-mips@vger.kernel.org
 S:     Maintained
 T:     git git://github.com/broadcom/stblinux.git
@@ -3927,53 +3928,53 @@ F:      drivers/net/wireless/broadcom/brcm80211/
 BROADCOM BRCMSTB GPIO DRIVER
 M:     Doug Berger <opendmb@gmail.com>
 M:     Florian Fainelli <f.fainelli@gmail.com>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 S:     Supported
 F:     Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.yaml
 F:     drivers/gpio/gpio-brcmstb.c
 
 BROADCOM BRCMSTB I2C DRIVER
 M:     Kamal Dasu <kdasu.kdev@gmail.com>
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-i2c@vger.kernel.org
-L:     bcm-kernel-feedback-list@broadcom.com
 S:     Supported
 F:     Documentation/devicetree/bindings/i2c/brcm,brcmstb-i2c.yaml
 F:     drivers/i2c/busses/i2c-brcmstb.c
 
 BROADCOM BRCMSTB UART DRIVER
 M:     Al Cooper <alcooperx@gmail.com>
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-serial@vger.kernel.org
-L:     bcm-kernel-feedback-list@broadcom.com
 S:     Maintained
 F:     Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml
 F:     drivers/tty/serial/8250/8250_bcm7271.c
 
 BROADCOM BRCMSTB USB EHCI DRIVER
 M:     Al Cooper <alcooperx@gmail.com>
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-usb@vger.kernel.org
-L:     bcm-kernel-feedback-list@broadcom.com
 S:     Maintained
 F:     Documentation/devicetree/bindings/usb/brcm,bcm7445-ehci.yaml
 F:     drivers/usb/host/ehci-brcm.*
 
 BROADCOM BRCMSTB USB PIN MAP DRIVER
 M:     Al Cooper <alcooperx@gmail.com>
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-usb@vger.kernel.org
-L:     bcm-kernel-feedback-list@broadcom.com
 S:     Maintained
 F:     Documentation/devicetree/bindings/usb/brcm,usb-pinmap.yaml
 F:     drivers/usb/misc/brcmstb-usb-pinmap.c
 
 BROADCOM BRCMSTB USB2 and USB3 PHY DRIVER
 M:     Al Cooper <alcooperx@gmail.com>
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-kernel@vger.kernel.org
-L:     bcm-kernel-feedback-list@broadcom.com
 S:     Maintained
 F:     drivers/phy/broadcom/phy-brcm-usb*
 
 BROADCOM ETHERNET PHY DRIVERS
 M:     Florian Fainelli <f.fainelli@gmail.com>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     Documentation/devicetree/bindings/net/broadcom-bcm87xx.txt
@@ -3984,7 +3985,7 @@ F:        include/linux/brcmphy.h
 BROADCOM GENET ETHERNET DRIVER
 M:     Doug Berger <opendmb@gmail.com>
 M:     Florian Fainelli <f.fainelli@gmail.com>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     Documentation/devicetree/bindings/net/brcm,bcmgenet.yaml
@@ -3998,7 +3999,7 @@ F:        include/linux/platform_data/mdio-bcm-unimac.h
 BROADCOM IPROC ARM ARCHITECTURE
 M:     Ray Jui <rjui@broadcom.com>
 M:     Scott Branden <sbranden@broadcom.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://github.com/broadcom/stblinux.git
@@ -4026,7 +4027,7 @@ N:        stingray
 
 BROADCOM IPROC GBIT ETHERNET DRIVER
 M:     Rafał Miłecki <rafal@milecki.pl>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/brcm,amac.yaml
@@ -4035,7 +4036,7 @@ F:        drivers/net/ethernet/broadcom/unimac.h
 
 BROADCOM KONA GPIO DRIVER
 M:     Ray Jui <rjui@broadcom.com>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 S:     Supported
 F:     Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt
 F:     drivers/gpio/gpio-bcm-kona.c
@@ -4068,7 +4069,7 @@ F:        drivers/firmware/broadcom/*
 BROADCOM PMB (POWER MANAGEMENT BUS) DRIVER
 M:     Rafał Miłecki <rafal@milecki.pl>
 M:     Florian Fainelli <f.fainelli@gmail.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-pm@vger.kernel.org
 S:     Maintained
 T:     git git://github.com/broadcom/stblinux.git
@@ -4084,7 +4085,7 @@ F:        include/linux/bcma/
 
 BROADCOM SPI DRIVER
 M:     Kamal Dasu <kdasu.kdev@gmail.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 S:     Maintained
 F:     Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.yaml
 F:     drivers/spi/spi-bcm-qspi.*
@@ -4093,7 +4094,7 @@ F:        drivers/spi/spi-iproc-qspi.c
 
 BROADCOM STB AVS CPUFREQ DRIVER
 M:     Markus Mayer <mmayer@broadcom.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-pm@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt
@@ -4101,7 +4102,7 @@ F:        drivers/cpufreq/brcmstb*
 
 BROADCOM STB AVS TMON DRIVER
 M:     Markus Mayer <mmayer@broadcom.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-pm@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/thermal/brcm,avs-tmon.yaml
@@ -4109,7 +4110,7 @@ F:        drivers/thermal/broadcom/brcmstb*
 
 BROADCOM STB DPFE DRIVER
 M:     Markus Mayer <mmayer@broadcom.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     Documentation/devicetree/bindings/memory-controllers/brcm,dpfe-cpu.yaml
@@ -4118,8 +4119,8 @@ F:        drivers/memory/brcmstb_dpfe.c
 BROADCOM STB NAND FLASH DRIVER
 M:     Brian Norris <computersforpeace@gmail.com>
 M:     Kamal Dasu <kdasu.kdev@gmail.com>
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-mtd@lists.infradead.org
-L:     bcm-kernel-feedback-list@broadcom.com
 S:     Maintained
 F:     drivers/mtd/nand/raw/brcmnand/
 F:     include/linux/platform_data/brcmnand.h
@@ -4128,7 +4129,7 @@ BROADCOM STB PCIE DRIVER
 M:     Jim Quinlan <jim2101024@gmail.com>
 M:     Nicolas Saenz Julienne <nsaenz@kernel.org>
 M:     Florian Fainelli <f.fainelli@gmail.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-pci@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
@@ -4136,7 +4137,7 @@ F:        drivers/pci/controller/pcie-brcmstb.c
 
 BROADCOM SYSTEMPORT ETHERNET DRIVER
 M:     Florian Fainelli <f.fainelli@gmail.com>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/broadcom/bcmsysport.*
@@ -4153,7 +4154,7 @@ F:        drivers/net/ethernet/broadcom/tg3.*
 
 BROADCOM VK DRIVER
 M:     Scott Branden <scott.branden@broadcom.com>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 S:     Supported
 F:     drivers/misc/bcm-vk/
 F:     include/uapi/linux/misc/bcm_vk.h
@@ -4640,6 +4641,7 @@ F:        drivers/input/touchscreen/chipone_icn8505.c
 
 CHROME HARDWARE PLATFORM SUPPORT
 M:     Benson Leung <bleung@chromium.org>
+L:     chrome-platform@lists.linux.dev
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/chrome-platform/linux.git
 F:     drivers/platform/chrome/
@@ -4648,6 +4650,7 @@ CHROMEOS EC CODEC DRIVER
 M:     Cheng-Yi Chiang <cychiang@chromium.org>
 M:     Tzung-Bi Shih <tzungbi@google.com>
 R:     Guenter Roeck <groeck@chromium.org>
+L:     chrome-platform@lists.linux.dev
 S:     Maintained
 F:     Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
 F:     sound/soc/codecs/cros_ec_codec.*
@@ -4655,6 +4658,7 @@ F:        sound/soc/codecs/cros_ec_codec.*
 CHROMEOS EC SUBDRIVERS
 M:     Benson Leung <bleung@chromium.org>
 R:     Guenter Roeck <groeck@chromium.org>
+L:     chrome-platform@lists.linux.dev
 S:     Maintained
 F:     drivers/power/supply/cros_usbpd-charger.c
 N:     cros_ec
@@ -4662,11 +4666,13 @@ N:      cros-ec
 
 CHROMEOS EC USB TYPE-C DRIVER
 M:     Prashant Malani <pmalani@chromium.org>
+L:     chrome-platform@lists.linux.dev
 S:     Maintained
 F:     drivers/platform/chrome/cros_ec_typec.c
 
 CHROMEOS EC USB PD NOTIFY DRIVER
 M:     Prashant Malani <pmalani@chromium.org>
+L:     chrome-platform@lists.linux.dev
 S:     Maintained
 F:     drivers/platform/chrome/cros_usbpd_notify.c
 F:     include/linux/platform_data/cros_usbpd_notify.h
@@ -4786,6 +4792,7 @@ F:        .clang-format
 CLANG/LLVM BUILD SUPPORT
 M:     Nathan Chancellor <nathan@kernel.org>
 M:     Nick Desaulniers <ndesaulniers@google.com>
+R:     Tom Rix <trix@redhat.com>
 L:     llvm@lists.linux.dev
 S:     Supported
 W:     https://clangbuiltlinux.github.io/
@@ -5157,6 +5164,20 @@ S:       Supported
 F:     drivers/cpuidle/cpuidle-psci.h
 F:     drivers/cpuidle/cpuidle-psci-domain.c
 
+CPUIDLE DRIVER - DT IDLE PM DOMAIN
+M:     Ulf Hansson <ulf.hansson@linaro.org>
+L:     linux-pm@vger.kernel.org
+S:     Supported
+F:     drivers/cpuidle/dt_idle_genpd.c
+F:     drivers/cpuidle/dt_idle_genpd.h
+
+CPUIDLE DRIVER - RISC-V SBI
+M:     Anup Patel <anup@brainfault.org>
+L:     linux-pm@vger.kernel.org
+L:     linux-riscv@lists.infradead.org
+S:     Maintained
+F:     drivers/cpuidle/cpuidle-riscv-sbi.c
+
 CRAMFS FILESYSTEM
 M:     Nicolas Pitre <nico@fluxnic.net>
 S:     Maintained
@@ -5696,7 +5717,7 @@ W:        http://lanana.org/docs/device-list/index.html
 
 DEVICE RESOURCE MANAGEMENT HELPERS
 M:     Hans de Goede <hdegoede@redhat.com>
-R:     Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+R:     Matti Vaittinen <mazziesaccount@gmail.com>
 S:     Maintained
 F:     include/linux/devm-helpers.h
 
@@ -6038,6 +6059,7 @@ F:        drivers/scsi/dpt/
 DRBD DRIVER
 M:     Philipp Reisner <philipp.reisner@linbit.com>
 M:     Lars Ellenberg <lars.ellenberg@linbit.com>
+M:     Christoph Böhmwalder <christoph.boehmwalder@linbit.com>
 L:     drbd-dev@lists.linbit.com
 S:     Supported
 W:     http://www.drbd.org
@@ -8655,7 +8677,6 @@ F:        include/linux/cciss*.h
 F:     include/uapi/linux/cciss*.h
 
 HFI1 DRIVER
-M:     Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
 M:     Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
@@ -9316,14 +9337,12 @@ F:      drivers/pci/hotplug/rpaphp*
 
 IBM Power SRIOV Virtual NIC Device Driver
 M:     Dany Madden <drt@linux.ibm.com>
-M:     Sukadev Bhattiprolu <sukadev@linux.ibm.com>
 R:     Thomas Falcon <tlfalcon@linux.ibm.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/ibm/ibmvnic.*
 
 IBM Power Virtual Accelerator Switchboard
-M:     Sukadev Bhattiprolu <sukadev@linux.ibm.com>
 L:     linuxppc-dev@lists.ozlabs.org
 S:     Supported
 F:     arch/powerpc/include/asm/vas.h
@@ -9517,6 +9536,12 @@ M:       Stanislaw Gruszka <stf_xl@wp.pl>
 S:     Maintained
 F:     drivers/usb/atm/ueagle-atm.c
 
+IMAGIS TOUCHSCREEN DRIVER
+M:     Markuss Broks <markuss.broks@gmail.com>
+S:     Maintained
+F:     Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml
+F:     drivers/input/touchscreen/imagis.c
+
 IMGTEC ASCII LCD DRIVER
 M:     Paul Burton <paulburton@kernel.org>
 S:     Maintained
@@ -9572,6 +9597,7 @@ F:        drivers/iio/pressure/dps310.c
 
 INFINIBAND SUBSYSTEM
 M:     Jason Gunthorpe <jgg@nvidia.com>
+M:     Leon Romanovsky <leonro@nvidia.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
 W:     https://github.com/linux-rdma/rdma-core
@@ -10343,6 +10369,7 @@ F:      include/linux/isapnp.h
 ISCSI
 M:     Lee Duncan <lduncan@suse.com>
 M:     Chris Leech <cleech@redhat.com>
+M:     Mike Christie <michael.christie@oracle.com>
 L:     open-iscsi@googlegroups.com
 L:     linux-scsi@vger.kernel.org
 S:     Maintained
@@ -10648,9 +10675,9 @@ F:      tools/testing/selftests/
 
 KERNEL SMB3 SERVER (KSMBD)
 M:     Namjae Jeon <linkinjeon@kernel.org>
-M:     Sergey Senozhatsky <senozhatsky@chromium.org>
 M:     Steve French <sfrench@samba.org>
 M:     Hyunchul Lee <hyc.lee@gmail.com>
+R:     Sergey Senozhatsky <senozhatsky@chromium.org>
 L:     linux-cifs@vger.kernel.org
 S:     Maintained
 T:     git git://git.samba.org/ksmbd.git
@@ -11182,7 +11209,7 @@ F:      scripts/spdxcheck.py
 
 LINEAR RANGES HELPERS
 M:     Mark Brown <broonie@kernel.org>
-R:     Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+R:     Matti Vaittinen <mazziesaccount@gmail.com>
 F:     lib/linear_ranges.c
 F:     lib/test_linear_ranges.c
 F:     include/linux/linear_range.h
@@ -11879,7 +11906,7 @@ F:      drivers/iio/proximity/mb1232.c
 
 MAXIM MAX17040 FAMILY FUEL GAUGE DRIVERS
 R:     Iskren Chernev <iskren.chernev@gmail.com>
-R:     Krzysztof Kozlowski <krzk@kernel.org>
+R:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 R:     Marek Szyprowski <m.szyprowski@samsung.com>
 R:     Matheus Castello <matheus@castello.eng.br>
 L:     linux-pm@vger.kernel.org
@@ -11889,7 +11916,7 @@ F:      drivers/power/supply/max17040_battery.c
 
 MAXIM MAX17042 FAMILY FUEL GAUGE DRIVERS
 R:     Hans de Goede <hdegoede@redhat.com>
-R:     Krzysztof Kozlowski <krzk@kernel.org>
+R:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 R:     Marek Szyprowski <m.szyprowski@samsung.com>
 R:     Sebastian Krzyszkowiak <sebastian.krzyszkowiak@puri.sm>
 R:     Purism Kernel Team <kernel@puri.sm>
@@ -11941,7 +11968,7 @@ F:      Documentation/devicetree/bindings/power/supply/maxim,max77976.yaml
 F:     drivers/power/supply/max77976_charger.c
 
 MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-pm@vger.kernel.org
 S:     Supported
@@ -11952,7 +11979,7 @@ F:      drivers/power/supply/max77693_charger.c
 
 MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS
 M:     Chanwoo Choi <cw00.choi@samsung.com>
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-kernel@vger.kernel.org
 S:     Supported
@@ -12375,7 +12402,7 @@ F:      drivers/mmc/host/mtk-sd.c
 
 MEDIATEK MT76 WIRELESS LAN DRIVER
 M:     Felix Fietkau <nbd@nbd.name>
-M:     Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+M:     Lorenzo Bianconi <lorenzo@kernel.org>
 M:     Ryder Lee <ryder.lee@mediatek.com>
 R:     Shayne Chen <shayne.chen@mediatek.com>
 R:     Sean Wang <sean.wang@mediatek.com>
@@ -12646,7 +12673,7 @@ F:      mm/memblock.c
 F:     tools/testing/memblock/
 
 MEMORY CONTROLLER DRIVERS
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux-mem-ctrl.git
@@ -13597,6 +13624,7 @@ F:      net/core/drop_monitor.c
 
 NETWORKING DRIVERS
 M:     "David S. Miller" <davem@davemloft.net>
+M:     Eric Dumazet <edumazet@google.com>
 M:     Jakub Kicinski <kuba@kernel.org>
 M:     Paolo Abeni <pabeni@redhat.com>
 L:     netdev@vger.kernel.org
@@ -13644,6 +13672,7 @@ F:      tools/testing/selftests/drivers/net/dsa/
 
 NETWORKING [GENERAL]
 M:     "David S. Miller" <davem@davemloft.net>
+M:     Eric Dumazet <edumazet@google.com>
 M:     Jakub Kicinski <kuba@kernel.org>
 M:     Paolo Abeni <pabeni@redhat.com>
 L:     netdev@vger.kernel.org
@@ -13790,10 +13819,11 @@ F:    include/uapi/linux/nexthop.h
 F:     net/ipv4/nexthop.c
 
 NFC SUBSYSTEM
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 L:     linux-nfc@lists.01.org (subscribers-only)
 L:     netdev@vger.kernel.org
 S:     Maintained
+B:     mailto:linux-nfc@lists.01.org
 F:     Documentation/devicetree/bindings/net/nfc/
 F:     drivers/nfc/
 F:     include/linux/platform_data/nfcmrvl.h
@@ -14107,7 +14137,7 @@ F:      Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
 F:     drivers/regulator/pf8x00-regulator.c
 
 NXP PTN5150A CC LOGIC AND EXTCON DRIVER
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml
@@ -14622,9 +14652,14 @@ L:     op-tee@lists.trustedfirmware.org
 S:     Maintained
 F:     drivers/char/hw_random/optee-rng.c
 
+OP-TEE RTC DRIVER
+M:     Clément Léger <clement.leger@bootlin.com>
+L:     linux-rtc@vger.kernel.org
+S:     Maintained
+F:     drivers/rtc/rtc-optee.c
+
 OPA-VNIC DRIVER
 M:     Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
-M:     Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
 F:     drivers/infiniband/ulp/opa_vnic
@@ -14655,7 +14690,7 @@ F:      scripts/dtc/
 
 OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
 M:     Rob Herring <robh+dt@kernel.org>
-M:     Krzysztof Kozlowski <krzk+dt@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski+dt@linaro.org>
 L:     devicetree@vger.kernel.org
 S:     Maintained
 C:     irc://irc.libera.chat/devicetree
@@ -15567,7 +15602,7 @@ F:      drivers/pinctrl/renesas/
 
 PIN CONTROLLER - SAMSUNG
 M:     Tomasz Figa <tomasz.figa@gmail.com>
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
 R:     Alim Akhtar <alim.akhtar@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -16066,7 +16101,6 @@ F:      include/uapi/linux/qemu_fw_cfg.h
 
 QIB DRIVER
 M:     Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
-M:     Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
 F:     drivers/infiniband/hw/qib/
@@ -16584,7 +16618,6 @@ F:      drivers/net/ethernet/rdc/r6040.c
 
 RDMAVT - RDMA verbs software
 M:     Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
-M:     Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
 F:     drivers/infiniband/sw/rdmavt
@@ -16979,8 +17012,7 @@ S:      Odd Fixes
 F:     drivers/tty/serial/rp2.*
 
 ROHM BD99954 CHARGER IC
-R:     Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
-L:     linux-power@fi.rohmeurope.com
+R:     Matti Vaittinen <mazziesaccount@gmail.com>
 S:     Supported
 F:     drivers/power/supply/bd99954-charger.c
 F:     drivers/power/supply/bd99954-charger.h
@@ -17003,8 +17035,7 @@ F:      drivers/regulator/bd9571mwv-regulator.c
 F:     include/linux/mfd/bd9571mwv.h
 
 ROHM POWER MANAGEMENT IC DEVICE DRIVERS
-R:     Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
-L:     linux-power@fi.rohmeurope.com
+R:     Matti Vaittinen <mazziesaccount@gmail.com>
 S:     Supported
 F:     drivers/clk/clk-bd718x7.c
 F:     drivers/gpio/gpio-bd71815.c
@@ -17246,7 +17277,7 @@ W:      http://www.ibm.com/developerworks/linux/linux390/
 F:     drivers/s390/scsi/zfcp_*
 
 S3C ADC BATTERY DRIVER
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 L:     linux-samsung-soc@vger.kernel.org
 S:     Odd Fixes
 F:     drivers/power/supply/s3c_adc_battery.c
@@ -17291,7 +17322,7 @@ F:      Documentation/admin-guide/LSM/SafeSetID.rst
 F:     security/safesetid/
 
 SAMSUNG AUDIO (ASoC) DRIVERS
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Supported
@@ -17299,7 +17330,7 @@ F:      Documentation/devicetree/bindings/sound/samsung*
 F:     sound/soc/samsung/
 
 SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 L:     linux-crypto@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
 S:     Maintained
@@ -17334,7 +17365,7 @@ S:      Maintained
 F:     drivers/platform/x86/samsung-laptop.c
 
 SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-kernel@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
@@ -17360,7 +17391,7 @@ F:      drivers/media/platform/samsung/s3c-camif/
 F:     include/media/drv-intf/s3c_camif.h
 
 SAMSUNG S3FWRN5 NFC DRIVER
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Krzysztof Opasiak <k.opasiak@samsung.com>
 L:     linux-nfc@lists.01.org (subscribers-only)
 S:     Maintained
@@ -17382,7 +17413,7 @@ S:      Supported
 F:     drivers/media/i2c/s5k5baf.c
 
 SAMSUNG S5P Security SubSystem (SSS) DRIVER
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Vladimir Zapolskiy <vz@mleia.com>
 L:     linux-crypto@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
@@ -17417,7 +17448,7 @@ F:      include/linux/clk/samsung.h
 F:     include/linux/platform_data/clk-s3c2410.h
 
 SAMSUNG SPI DRIVERS
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Andi Shyti <andi@etezian.org>
 L:     linux-spi@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
@@ -17435,7 +17466,7 @@ F:      drivers/net/ethernet/samsung/sxgbe/
 
 SAMSUNG THERMAL DRIVER
 M:     Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 L:     linux-pm@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
 S:     Maintained
@@ -17619,8 +17650,8 @@ K:      \bTIF_SECCOMP\b
 
 SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) Broadcom BRCMSTB DRIVER
 M:     Al Cooper <alcooperx@gmail.com>
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-mmc@vger.kernel.org
-L:     bcm-kernel-feedback-list@broadcom.com
 S:     Maintained
 F:     drivers/mmc/host/sdhci-brcmstb*
 
@@ -20539,14 +20570,15 @@ F:    Documentation/admin-guide/media/zr364xx*
 F:     drivers/media/usb/zr364xx/
 
 USER-MODE LINUX (UML)
-M:     Jeff Dike <jdike@addtoit.com>
 M:     Richard Weinberger <richard@nod.at>
 M:     Anton Ivanov <anton.ivanov@cambridgegreys.com>
+M:     Johannes Berg <johannes@sipsolutions.net>
 L:     linux-um@lists.infradead.org
 S:     Maintained
 W:     http://user-mode-linux.sourceforge.net
 Q:     https://patchwork.ozlabs.org/project/linux-um/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/uml/linux.git next
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/uml/linux.git fixes
 F:     Documentation/virt/uml/
 F:     arch/um/
 F:     arch/x86/um/
@@ -21085,7 +21117,7 @@ F:      include/linux/regulator/
 K:     regulator_get_optional
 
 VOLTAGE AND CURRENT REGULATOR IRQ HELPERS
-R:     Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+R:     Matti Vaittinen <mazziesaccount@gmail.com>
 F:     drivers/regulator/irq_helpers.c
 
 VRF
@@ -21198,10 +21230,8 @@ S:     Maintained
 F:     drivers/hid/hid-wiimote*
 
 WILOCITY WIL6210 WIRELESS DRIVER
-M:     Maya Erez <merez@codeaurora.org>
 L:     linux-wireless@vger.kernel.org
-L:     wil6210@qti.qualcomm.com
-S:     Supported
+S:     Orphan
 W:     https://wireless.wiki.kernel.org/en/users/Drivers/wil6210
 F:     drivers/net/wireless/ath/wil6210/
 
index c28c5d91e5c884b01117cfcfac5ffd2e0564b9f0..fa5112a0ec1b00db2feab34075879205476b26b7 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 5
-PATCHLEVEL = 17
+PATCHLEVEL = 18
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc3
 NAME = Superb Owl
 
 # *DOCUMENTATION*
@@ -424,19 +424,26 @@ HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null)
 HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null)
 
 ifneq ($(LLVM),)
-HOSTCC = clang
-HOSTCXX        = clang++
+ifneq ($(filter %/,$(LLVM)),)
+LLVM_PREFIX := $(LLVM)
+else ifneq ($(filter -%,$(LLVM)),)
+LLVM_SUFFIX := $(LLVM)
+endif
+
+HOSTCC = $(LLVM_PREFIX)clang$(LLVM_SUFFIX)
+HOSTCXX        = $(LLVM_PREFIX)clang++$(LLVM_SUFFIX)
 else
 HOSTCC = gcc
 HOSTCXX        = g++
 endif
 
-export KBUILD_USERCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \
-                             -O2 -fomit-frame-pointer -std=gnu11 \
-                             -Wdeclaration-after-statement
-export KBUILD_USERLDFLAGS :=
+KBUILD_USERHOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \
+                        -O2 -fomit-frame-pointer -std=gnu11 \
+                        -Wdeclaration-after-statement
+KBUILD_USERCFLAGS  := $(KBUILD_USERHOSTCFLAGS) $(USERCFLAGS)
+KBUILD_USERLDFLAGS := $(USERLDFLAGS)
 
-KBUILD_HOSTCFLAGS   := $(KBUILD_USERCFLAGS) $(HOST_LFS_CFLAGS) $(HOSTCFLAGS)
+KBUILD_HOSTCFLAGS   := $(KBUILD_USERHOSTCFLAGS) $(HOST_LFS_CFLAGS) $(HOSTCFLAGS)
 KBUILD_HOSTCXXFLAGS := -Wall -O2 $(HOST_LFS_CFLAGS) $(HOSTCXXFLAGS)
 KBUILD_HOSTLDFLAGS  := $(HOST_LFS_LDFLAGS) $(HOSTLDFLAGS)
 KBUILD_HOSTLDLIBS   := $(HOST_LFS_LIBS) $(HOSTLDLIBS)
@@ -444,14 +451,14 @@ KBUILD_HOSTLDLIBS   := $(HOST_LFS_LIBS) $(HOSTLDLIBS)
 # Make variables (CC, etc...)
 CPP            = $(CC) -E
 ifneq ($(LLVM),)
-CC             = clang
-LD             = ld.lld
-AR             = llvm-ar
-NM             = llvm-nm
-OBJCOPY                = llvm-objcopy
-OBJDUMP                = llvm-objdump
-READELF                = llvm-readelf
-STRIP          = llvm-strip
+CC             = $(LLVM_PREFIX)clang$(LLVM_SUFFIX)
+LD             = $(LLVM_PREFIX)ld.lld$(LLVM_SUFFIX)
+AR             = $(LLVM_PREFIX)llvm-ar$(LLVM_SUFFIX)
+NM             = $(LLVM_PREFIX)llvm-nm$(LLVM_SUFFIX)
+OBJCOPY                = $(LLVM_PREFIX)llvm-objcopy$(LLVM_SUFFIX)
+OBJDUMP                = $(LLVM_PREFIX)llvm-objdump$(LLVM_SUFFIX)
+READELF                = $(LLVM_PREFIX)llvm-readelf$(LLVM_SUFFIX)
+STRIP          = $(LLVM_PREFIX)llvm-strip$(LLVM_SUFFIX)
 else
 CC             = $(CROSS_COMPILE)gcc
 LD             = $(CROSS_COMPILE)ld
@@ -531,6 +538,7 @@ export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AW
 export PERL PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
 export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD
 export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
+export KBUILD_USERCFLAGS KBUILD_USERLDFLAGS
 
 export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
 export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
@@ -785,10 +793,6 @@ ifdef CONFIG_CC_IS_CLANG
 KBUILD_CPPFLAGS += -Qunused-arguments
 # The kernel builds with '-std=gnu11' so use of GNU extensions is acceptable.
 KBUILD_CFLAGS += -Wno-gnu
-# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
-# source of a reference will be _MergedGlobals and not on of the whitelisted names.
-# See modpost pattern 2
-KBUILD_CFLAGS += -mno-global-merge
 else
 
 # gcc inanely warns about local variables called 'main'
@@ -1237,8 +1241,8 @@ define filechk_version.h
        echo \#define LINUX_VERSION_SUBLEVEL $(SUBLEVEL)
 endef
 
-$(version_h): PATCHLEVEL := $(if $(PATCHLEVEL), $(PATCHLEVEL), 0)
-$(version_h): SUBLEVEL := $(if $(SUBLEVEL), $(SUBLEVEL), 0)
+$(version_h): PATCHLEVEL := $(or $(PATCHLEVEL), 0)
+$(version_h): SUBLEVEL := $(or $(SUBLEVEL), 0)
 $(version_h): FORCE
        $(call filechk,version.h)
 
@@ -1621,7 +1625,7 @@ help:
        @$(MAKE) -f $(srctree)/Documentation/Makefile dochelp
        @echo  ''
        @echo  'Architecture specific targets ($(SRCARCH)):'
-       @$(if $(archhelp),$(archhelp),\
+       @$(or $(archhelp),\
                echo '  No architecture specific help defined for $(SRCARCH)')
        @echo  ''
        @$(if $(boards), \
@@ -1838,7 +1842,7 @@ $(clean-dirs):
 
 clean: $(clean-dirs)
        $(call cmd,rmfiles)
-       @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
+       @find $(or $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
                \( -name '*.[aios]' -o -name '*.ko' -o -name '.*.cmd' \
                -o -name '*.ko.*' \
                -o -name '*.dtb' -o -name '*.dtbo' -o -name '*.dtb.S' -o -name '*.dt.yaml' \
index 29b0167c088b8af08b7fd53f08b32f4d15296e76..31c4fdc4a4baaa7ad84063c98c30e01d77de349b 100644 (file)
@@ -854,10 +854,8 @@ config HAVE_ARCH_HUGE_VMAP
 
 #
 #  Archs that select this would be capable of PMD-sized vmaps (i.e.,
-#  arch_vmap_pmd_supported() returns true), and they must make no assumptions
-#  that vmalloc memory is mapped with PAGE_SIZE ptes. The VM_NO_HUGE_VMAP flag
-#  can be used to prohibit arch-specific allocations from using hugepages to
-#  help with this (e.g., modules may require it).
+#  arch_vmap_pmd_supported() returns true). The VM_ALLOW_HUGE_VMAP flag
+#  must be used to enable allocations to use hugepages.
 #
 config HAVE_ARCH_HUGE_VMALLOC
        depends on HAVE_ARCH_HUGE_VMAP
index 3df37492c7b70688b17c44f7bc408cbb48c9be7f..c9f525a6aab8099023710ffe7ec57c0f2b737f48 100644 (file)
@@ -45,10 +45,4 @@ struct user {
        char            u_comm[32];             /* user command name */
 };
 
-#define NBPG                   PAGE_SIZE
-#define UPAGES                 1
-#define HOST_TEXT_START_ADDR   (u.start_code)
-#define HOST_DATA_START_ADDR   (u.start_data)
-#define HOST_STACK_END_ADDR    (u.start_stack + u.u_ssize * NBPG)
-
 #endif /* _ALPHA_USER_H */
index 6713c65a25e15cfe96824cbcdba6bb8f222ef274..b265e4bc16c2e0bc52c63819fda704ebda00de31 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index 187a187706cbeb2b5d2339611b5c0a71420221ff..41bcbb460fac459c70261a2cb384b13c56ef9058 100644 (file)
@@ -92,6 +92,8 @@ ifeq ($(CONFIG_USE_OF),y)
 OBJS   += $(libfdt_objs) fdt_check_mem_start.o
 endif
 
+OBJS   += lib1funcs.o ashldi3.o bswapsdi2.o
+
 targets       := vmlinux vmlinux.lds piggy_data piggy.o \
                 head.o $(OBJS)
 
@@ -126,8 +128,6 @@ endif
 # Next argument is a linker script
 LDFLAGS_vmlinux += -T
 
-OBJS   += lib1funcs.o ashldi3.o bswapsdi2.o
-
 # We need to prevent any GOTOFF relocs being used with references
 # to symbols in the .bss section since we cannot relocate them
 # independently from the rest at run time.  This can be achieved by
index 2799b2a1f4d25856eda252b4a64d7396448d4059..f7d90cf1bb772c934b2f93d10e36fd32e05e72ef 100644 (file)
                pinctrl_pio_io_reset: gpio_io_reset {
                        pinmux = <PIN_PB30__GPIO>;
                        bias-disable;
-                       drive-open-drain = <1>;
+                       drive-open-drain;
                        output-low;
                };
                pinctrl_pio_input: gpio_input {
index abe27adfa4d65ac050b4fa9fd72c7fe3651f18cf..46566462841990483372f5af1c394601a63a123b 100644 (file)
                pinmux = <PIN_PD12__FLEXCOM4_IO0>, //DATA
                <PIN_PD13__FLEXCOM4_IO1>; //CLK
                bias-disable;
-               drive-open-drain = <1>;
+               drive-open-drain;
        };
 
        pinctrl_pwm0 {
index 1e2a28c2f365d39a685f4524bcf0feced1dd431c..2fb51b9aca2ae3f78a7c21a5c962421895e78db8 100644 (file)
                nand0: nand@40000000 {
                        nand-bus-width = <8>;
                        nand-ecc-mode = "soft";
-                       nand-on-flash-bbt = <1>;
+                       nand-on-flash-bbt;
                        status = "okay";
                };
 
index 87c517d65f62fee6b439c8d32fcd9361e52a3f3e..e9aecac4f5b5b462d2c077d34d377bf1fa1437b1 100644 (file)
        status = "okay";
        pinctrl-names = "default";
        pinctrl-0 = <&spi1_pins &spi1_cs0_pin>;
-       flash: m25p80@0 {
+       flash: flash@0 {
                #address-cells = <1>;
                #size-cells = <1>;
                compatible = "jedec,spi-nor";
index 5126e2d72ed7893bb76ac9eb807aba84b21c6443..778796c10af86d2e9a5bd841426db0e68fb6396a 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&mcspi1_pins>;
 
-       m25p80@0 {
+       flash@0 {
                compatible = "w25x32";
                spi-max-frequency = <48000000>;
                reg = <0>;
index 097ec35c62d808c74298ecedebc82ee54da79c2e..0d58da1c0cc51ae417f443d05473c08c77520d2a 100644 (file)
@@ -26,7 +26,7 @@
                                pinctrl-0 = <&mmc0_4bit_pins_a
                                             &mmc0_sck_cfg
                                             &en_sd_pwr>;
-                               broken-cd = <1>;
+                               broken-cd;
                                bus-width = <4>;
                                vmmc-supply = <&reg_vddio_sd0>;
                                status = "okay";
index 563bf9d44fe0d0fc6c1c18d133fe75feb79263fe..0b90c3f59f8987a0521a7a97246b25cecc2cd12a 100644 (file)
                regulators {
                        bcore1 {
                                regulator-name = "bcore1";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        bcore2 {
                                regulator-name = "bcore2";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        bpro {
                                regulator-name = "bpro";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        bperi {
                                regulator-name = "bperi";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        bmem {
                                regulator-name = "bmem";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo2 {
                                regulator-name = "ldo2";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <1800000>;
                        };
 
                        ldo3 {
                                regulator-name = "ldo3";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo4 {
                                regulator-name = "ldo4";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo5 {
                                regulator-name = "ldo5";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo6 {
                                regulator-name = "ldo6";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo7 {
                                regulator-name = "ldo7";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo8 {
                                regulator-name = "ldo8";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo9 {
                                regulator-name = "ldo9";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo10 {
                                regulator-name = "ldo10";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo11 {
                                regulator-name = "ldo11";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        bio {
                                regulator-name = "bio";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <1800000>;
                                regulator-max-microvolt = <1800000>;
                        };
index 7cda6944501d94db8900d62a2413b6b610902a5f..205e4d4627028c3ec5e71851799f740fb2b36c8e 100644 (file)
@@ -72,8 +72,8 @@
                        st,settling = <2>;
                        st,fraction-z = <7>;
                        st,i-drive = <1>;
-                       touchscreen-inverted-x = <1>;
-                       touchscreen-inverted-y = <1>;
+                       touchscreen-inverted-x;
+                       touchscreen-inverted-y;
                };
        };
 };
index b4664ab002566b5f1173cdeeebc1de78ecfc98ba..d3da8b1b473b8ee6aa9cac0882eef809bc8c2b26 100644 (file)
                gpmc,device-width = <2>;
                gpmc,wait-pin = <0>;
                gpmc,burst-length = <4>;
-               gpmc,cycle2cycle-samecsen = <1>;
-               gpmc,cycle2cycle-diffcsen = <1>;
+               gpmc,cycle2cycle-samecsen;
+               gpmc,cycle2cycle-diffcsen;
                gpmc,cs-on-ns = <0>;
                gpmc,cs-rd-off-ns = <45>;
                gpmc,cs-wr-off-ns = <45>;
index cbe42c4153a0fd51f55e3df254c1e932249713bb..b4d286a6fab1c59c2f5c7a8909cee00c5ee427ab 100644 (file)
@@ -76,7 +76,7 @@
                pinconf {
                        pins = "gpio20", "gpio21";
                        drive-strength = <2>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
                pinconf {
                        pins = "gpio24", "gpio25";
                        drive-strength = <2>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
                pinconf {
                        pins = "gpio8", "gpio9";
                        drive-strength = <2>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
                pinconf {
                        pins = "gpio12", "gpio13";
                        drive-strength = <2>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
                pinconf {
                        pins = "gpio16", "gpio17";
                        drive-strength = <2>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
                pinconf {
                        pins = "gpio84", "gpio85";
                        drive-strength = <2>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
index 996f4458d9fc531a6d36b712ad969322bceaf7a6..8cb04aa8ed2fe9ab0f2cc122cdc72f6c3f484169 100644 (file)
 
                        snps,axi-config = <&stmmac_axi_setup>;
                        snps,pbl = <32>;
-                       snps,aal = <1>;
+                       snps,aal;
 
                        qcom,nss-common = <&nss_common>;
                        qcom,qsgmii-csr = <&qsgmii_csr>;
 
                        snps,axi-config = <&stmmac_axi_setup>;
                        snps,pbl = <32>;
-                       snps,aal = <1>;
+                       snps,aal;
 
                        qcom,nss-common = <&nss_common>;
                        qcom,qsgmii-csr = <&qsgmii_csr>;
 
                        snps,axi-config = <&stmmac_axi_setup>;
                        snps,pbl = <32>;
-                       snps,aal = <1>;
+                       snps,aal;
 
                        qcom,nss-common = <&nss_common>;
                        qcom,qsgmii-csr = <&qsgmii_csr>;
 
                        snps,axi-config = <&stmmac_axi_setup>;
                        snps,pbl = <32>;
-                       snps,aal = <1>;
+                       snps,aal;
 
                        qcom,nss-common = <&nss_common>;
                        qcom,qsgmii-csr = <&qsgmii_csr>;
index 4cbadcb410841d1bce8ebfd4558d08bbe73e83db..ddd1cf4d05543e67e61aa91db1a07fba5dcac4d9 100644 (file)
                                        };
                                };
 
-                               m25p80@1 {
+                               flash@1 {
                                        compatible = "st,m25p80";
                                        reg = <1>;
                                        spi-max-frequency = <12000000>;
index fd194ebeedc92c02ce558836ad51f62415d5e5bc..3a51a41eb5e4d4ffebfae79f604817dfb0b65fb3 100644 (file)
                                cs-gpios = <&gpiopinctrl 80 0>, <&gpiopinctrl 24 0>,
                                           <&gpiopinctrl 85 0>;
 
-                               m25p80@0 {
+                               flash@0 {
                                        compatible = "m25p80";
                                        reg = <0>;
                                        spi-max-frequency = <12000000>;
index 827e887afbda42b4c15e175fcf6b196cc55f1576..13e1bdb3ddbf1190b2a16258b078470059c4f7a3 100644 (file)
                                reg = <0xb4100000 0x1000>;
                                interrupts = <0 105 0x4>;
                                status = "disabled";
-                               dmas = <&dwdma0 12 0 1>,
-                                       <&dwdma0 13 1 0>;
-                               dma-names = "tx", "rx";
+                               dmas = <&dwdma0 13 0 1>,
+                                       <&dwdma0 12 1 0>;
+                               dma-names = "rx", "tx";
                        };
 
                        thermal@e07008c4 {
index c87b881b2c8bb244fc43cdc37c97c6bd0f7657eb..9135533676879e8abb0d24aeb9a6c14b4701261d 100644 (file)
                                #size-cells = <0>;
                                interrupts = <0 31 0x4>;
                                status = "disabled";
-                               dmas = <&dwdma0 4 0 0>,
-                                       <&dwdma0 5 0 0>;
-                               dma-names = "tx", "rx";
+                               dmas = <&dwdma0 5 0 0>,
+                                       <&dwdma0 4 0 0>;
+                               dma-names = "rx", "tx";
                        };
 
                        rtc@e0580000 {
index 33ae5e0590df6196e75f25cfd10f9d62c31f39b8..ac53ee3c496b283d8373187ba5fb31a61620cff8 100644 (file)
        #size-cells = <0>;
        status = "okay";
 
-       flash0: is25lp016d@0 {
+       flash0: flash@0 {
                compatible = "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <133000000>;
index e222d2d2cb4496df351d862281583da3db74517d..d142dd30e16b32cfe50f2c4e76096de9703ea5a2 100644 (file)
        #size-cells = <0>;
        status = "okay";
 
-       flash0: mx66l51235l@0 {
+       flash0: flash@0 {
                compatible = "jedec,spi-nor";
                reg = <0>;
                spi-rx-bus-width = <4>;
                #size-cells = <1>;
        };
 
-       flash1: mx66l51235l@1 {
+       flash1: flash@1 {
                compatible = "jedec,spi-nor";
                reg = <1>;
                spi-rx-bus-width = <4>;
index a7acfee11ffcb63f0df4f80ed4a0b175946ee778..a80bc8a43091db668b8743b726ff024f1a07b96c 100644 (file)
@@ -49,11 +49,13 @@ CONFIG_ATA=y
 CONFIG_PATA_FTIDE010=y
 CONFIG_NETDEVICES=y
 CONFIG_TUN=y
+CONFIG_NET_DSA_REALTEK=y
 CONFIG_NET_DSA_REALTEK_SMI=y
+CONFIG_NET_DSA_REALTEK_RTL8366RB=y
 CONFIG_GEMINI_ETHERNET=y
+CONFIG_MARVELL_PHY=y
 CONFIG_MDIO_BITBANG=y
 CONFIG_MDIO_GPIO=y
-CONFIG_MARVELL_PHY=y
 CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_GPIO=y
 # CONFIG_INPUT_MOUSE is not set
@@ -66,6 +68,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_I2C_GPIO=y
 CONFIG_SPI=y
 CONFIG_SPI_GPIO=y
+CONFIG_SENSORS_DRIVETEMP=y
 CONFIG_SENSORS_GPIO_FAN=y
 CONFIG_SENSORS_LM75=y
 CONFIG_THERMAL=y
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
deleted file mode 100644 (file)
index 015b7ef..0000000
+++ /dev/null
@@ -1,365 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_BZIP2=y
-CONFIG_RD_LZMA=y
-CONFIG_EXPERT=y
-# CONFIG_COMPAT_BRK is not set
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_PXA=y
-CONFIG_MACH_INTELMOTE2=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_PREEMPT=y
-CONFIG_AEABI=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="root=/dev/mtdblock2 rootfstype=jffs2 console=ttyS2,115200 mem=32M"
-CONFIG_KEXEC=y
-CONFIG_FPE_NWFPE=y
-CONFIG_BINFMT_AOUT=m
-CONFIG_BINFMT_MISC=m
-CONFIG_PM=y
-CONFIG_APM_EMULATION=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_DIAG is not set
-CONFIG_INET6_AH=m
-CONFIG_INET6_ESP=m
-CONFIG_INET6_IPCOMP=m
-CONFIG_IPV6_MIP6=m
-CONFIG_IPV6_TUNNEL=m
-CONFIG_IPV6_MULTIPLE_TABLES=y
-CONFIG_IPV6_SUBTREES=y
-CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
-CONFIG_NF_CONNTRACK=m
-CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_SCTP=y
-CONFIG_NF_CT_PROTO_UDPLITE=y
-CONFIG_NF_CONNTRACK_AMANDA=m
-CONFIG_NF_CONNTRACK_FTP=m
-CONFIG_NF_CONNTRACK_H323=m
-CONFIG_NF_CONNTRACK_IRC=m
-CONFIG_NF_CONNTRACK_NETBIOS_NS=m
-CONFIG_NF_CONNTRACK_PPTP=m
-CONFIG_NF_CONNTRACK_SANE=m
-CONFIG_NF_CONNTRACK_SIP=m
-CONFIG_NF_CONNTRACK_TFTP=m
-CONFIG_NF_CT_NETLINK=m
-CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
-CONFIG_NETFILTER_XT_TARGET_LED=m
-CONFIG_NETFILTER_XT_TARGET_MARK=m
-CONFIG_NETFILTER_XT_TARGET_NFLOG=m
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
-CONFIG_NETFILTER_XT_MATCH_COMMENT=m
-CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
-CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
-CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
-CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
-CONFIG_NETFILTER_XT_MATCH_DSCP=m
-CONFIG_NETFILTER_XT_MATCH_ESP=m
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
-CONFIG_NETFILTER_XT_MATCH_HELPER=m
-CONFIG_NETFILTER_XT_MATCH_LENGTH=m
-CONFIG_NETFILTER_XT_MATCH_LIMIT=m
-CONFIG_NETFILTER_XT_MATCH_MAC=m
-CONFIG_NETFILTER_XT_MATCH_MARK=m
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
-CONFIG_NETFILTER_XT_MATCH_POLICY=m
-CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
-CONFIG_NETFILTER_XT_MATCH_QUOTA=m
-CONFIG_NETFILTER_XT_MATCH_REALM=m
-CONFIG_NETFILTER_XT_MATCH_SCTP=m
-CONFIG_NETFILTER_XT_MATCH_STATE=m
-CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
-CONFIG_NETFILTER_XT_MATCH_STRING=m
-CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
-CONFIG_NETFILTER_XT_MATCH_TIME=m
-CONFIG_NETFILTER_XT_MATCH_U32=m
-CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
-CONFIG_IP_NF_MATCH_AH=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
-CONFIG_IP_NF_TARGET_ECN=m
-CONFIG_IP_NF_TARGET_TTL=m
-CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_ARPTABLES=m
-CONFIG_IP_NF_ARPFILTER=m
-CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_IPTABLES=m
-CONFIG_IP6_NF_MATCH_AH=m
-CONFIG_IP6_NF_MATCH_EUI64=m
-CONFIG_IP6_NF_MATCH_FRAG=m
-CONFIG_IP6_NF_MATCH_OPTS=m
-CONFIG_IP6_NF_MATCH_HL=m
-CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_MATCH_MH=m
-CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_FILTER=m
-CONFIG_IP6_NF_TARGET_REJECT=m
-CONFIG_IP6_NF_MANGLE=m
-CONFIG_IP6_NF_RAW=m
-CONFIG_BRIDGE=m
-# CONFIG_BRIDGE_IGMP_SNOOPING is not set
-CONFIG_IEEE802154=y
-# CONFIG_WIRELESS is not set
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_FW_LOADER=m
-CONFIG_CONNECTOR=m
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_AFS_PARTS=y
-CONFIG_MTD_AR7_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_ADV_OPTIONS=y
-CONFIG_MTD_CFI_GEOMETRY=y
-# CONFIG_MTD_MAP_BANK_WIDTH_1 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set
-# CONFIG_MTD_CFI_I2 is not set
-CONFIG_MTD_OTP=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_PXA2XX=y
-CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=y
-# CONFIG_WLAN is not set
-CONFIG_PPP=m
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-# CONFIG_INPUT_MOUSEDEV is not set
-CONFIG_INPUT_EVDEV=y
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_GPIO=y
-CONFIG_KEYBOARD_PXA27x=y
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_UINPUT=y
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_PXA=y
-CONFIG_SERIAL_PXA_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=8
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_PXA=y
-CONFIG_SPI=y
-CONFIG_SPI_PXA2XX=y
-CONFIG_GPIO_SYSFS=y
-CONFIG_POWER_SUPPLY=y
-# CONFIG_HWMON is not set
-CONFIG_PMIC_DA903X=y
-CONFIG_REGULATOR=y
-CONFIG_REGULATOR_DEBUG=y
-CONFIG_REGULATOR_DA903X=y
-CONFIG_MEDIA_SUPPORT=y
-CONFIG_VIDEO_DEV=y
-CONFIG_MEDIA_TUNER_CUSTOMISE=y
-# CONFIG_MEDIA_TUNER_SIMPLE is not set
-# CONFIG_MEDIA_TUNER_TDA8290 is not set
-# CONFIG_MEDIA_TUNER_TDA827X is not set
-# CONFIG_MEDIA_TUNER_TDA18271 is not set
-# CONFIG_MEDIA_TUNER_TDA9887 is not set
-# CONFIG_MEDIA_TUNER_TEA5761 is not set
-# CONFIG_MEDIA_TUNER_TEA5767 is not set
-# CONFIG_MEDIA_TUNER_MT20XX is not set
-# CONFIG_MEDIA_TUNER_MT2060 is not set
-# CONFIG_MEDIA_TUNER_MT2266 is not set
-# CONFIG_MEDIA_TUNER_MT2131 is not set
-# CONFIG_MEDIA_TUNER_QT1010 is not set
-# CONFIG_MEDIA_TUNER_XC2028 is not set
-# CONFIG_MEDIA_TUNER_XC5000 is not set
-# CONFIG_MEDIA_TUNER_MXL5005S is not set
-# CONFIG_MEDIA_TUNER_MXL5007T is not set
-# CONFIG_MEDIA_TUNER_MC44S803 is not set
-# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
-CONFIG_VIDEO_PXA27x=y
-# CONFIG_V4L_USB_DRIVERS is not set
-# CONFIG_RADIO_ADAPTERS is not set
-CONFIG_FB=y
-CONFIG_FB_PXA=y
-CONFIG_FB_PXA_OVERLAY=y
-CONFIG_FB_PXA_PARAMETERS=y
-# CONFIG_LCD_CLASS_DEVICE is not set
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_MINI_4x6=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
-# CONFIG_SND_DRIVERS is not set
-# CONFIG_SND_ARM is not set
-# CONFIG_SND_SPI is not set
-# CONFIG_SND_USB is not set
-CONFIG_SND_SOC=y
-CONFIG_SND_PXA2XX_SOC=y
-# CONFIG_USB_HID is not set
-CONFIG_USB=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_GADGET=y
-CONFIG_USB_PXA27X=y
-CONFIG_USB_ETH=m
-# CONFIG_USB_ETH_RNDIS is not set
-CONFIG_MMC=y
-CONFIG_SDIO_UART=m
-CONFIG_MMC_PXA=y
-CONFIG_MMC_SPI=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_LP3944=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_LEDS_TRIGGER_BACKLIGHT=y
-CONFIG_LEDS_TRIGGER_GPIO=y
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_PXA=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=m
-CONFIG_AUTOFS4_FS=y
-CONFIG_FUSE_FS=m
-CONFIG_CUSE=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_FS_WBUF_VERIFY=y
-CONFIG_JFFS2_SUMMARY=y
-CONFIG_JFFS2_FS_XATTR=y
-CONFIG_JFFS2_COMPRESSION_OPTIONS=y
-CONFIG_JFFS2_LZO=y
-CONFIG_JFFS2_RUBIN=y
-CONFIG_CRAMFS=m
-CONFIG_SQUASHFS=m
-CONFIG_ROMFS_FS=m
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V3_ACL=y
-CONFIG_SMB_FS=m
-CONFIG_CIFS=m
-CONFIG_CIFS_STATS=y
-CONFIG_CIFS_XATTR=y
-CONFIG_CIFS_POSIX=y
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=m
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_SCHED_DEBUG is not set
-CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_PROVE_LOCKING=y
-# CONFIG_FTRACE is not set
-CONFIG_DEBUG_USER=y
-CONFIG_CRYPTO_NULL=m
-CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_GHASH=m
-CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_AES=m
-CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAST5=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SEED=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC16=y
index 3b30913d7d8d33ccfc3abe49ce7d20ce29eb76f8..a352207a64d74d054fffe201c7a17c8c520f4029 100644 (file)
@@ -20,7 +20,6 @@ CONFIG_VFP=y
 CONFIG_NEON=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_CMA=y
 CONFIG_NET=y
@@ -41,6 +40,8 @@ CONFIG_MAC80211_LEDS=y
 CONFIG_CAIF=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_GNSS=y
+CONFIG_GNSS_SIRF_SERIAL=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=65536
 CONFIG_NETDEVICES=y
@@ -83,6 +84,8 @@ CONFIG_SPI_GPIO=y
 CONFIG_SPI_PL022=y
 CONFIG_GPIO_STMPE=y
 CONFIG_GPIO_TC3589X=y
+CONFIG_BATTERY_SAMSUNG_SDI=y
+CONFIG_AB8500_BM=y
 CONFIG_SENSORS_IIO_HWMON=y
 CONFIG_SENSORS_NTC_THERMISTOR=y
 CONFIG_THERMAL=y
@@ -98,10 +101,13 @@ CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_V4L2_FLASH_LED_CLASS=y
 CONFIG_DRM=y
 CONFIG_DRM_PANEL_NOVATEK_NT35510=y
+CONFIG_DRM_PANEL_NOVATEK_NT35560=y
+CONFIG_DRM_PANEL_SAMSUNG_DB7430=y
 CONFIG_DRM_PANEL_SAMSUNG_S6D16D0=y
+CONFIG_DRM_PANEL_SAMSUNG_S6D27A1=y
 CONFIG_DRM_PANEL_SAMSUNG_S6E63M0=y
 CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_DSI=y
-CONFIG_DRM_PANEL_SONY_ACX424AKP=y
+CONFIG_DRM_PANEL_WIDECHIPS_WS2401=y
 CONFIG_DRM_LIMA=y
 CONFIG_DRM_MCDE=y
 CONFIG_FB=y
@@ -129,6 +135,7 @@ CONFIG_LEDS_LM3530=y
 CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_LP55XX_COMMON=y
 CONFIG_LEDS_LP5521=y
+CONFIG_LEDS_REGULATOR=y
 CONFIG_LEDS_RT8515=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_RTC_CLASS=y
@@ -144,17 +151,22 @@ CONFIG_IIO_SW_TRIGGER=y
 CONFIG_BMA180=y
 CONFIG_BMC150_ACCEL=y
 CONFIG_IIO_ST_ACCEL_3AXIS=y
+# CONFIG_IIO_ST_ACCEL_SPI_3AXIS is not set
 CONFIG_IIO_RESCALE=y
 CONFIG_MPU3050_I2C=y
 CONFIG_IIO_ST_GYRO_3AXIS=y
+# CONFIG_IIO_ST_GYRO_SPI_3AXIS is not set
 CONFIG_INV_MPU6050_I2C=y
 CONFIG_BH1780=y
 CONFIG_GP2AP002=y
+CONFIG_TSL2772=y
 CONFIG_AK8974=y
 CONFIG_IIO_ST_MAGN_3AXIS=y
+# CONFIG_IIO_ST_MAGN_SPI_3AXIS is not set
 CONFIG_YAMAHA_YAS530=y
 CONFIG_IIO_HRTIMER_TRIGGER=y
 CONFIG_IIO_ST_PRESS=y
+# CONFIG_IIO_ST_PRESS_SPI is not set
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT2_FS_POSIX_ACL=y
@@ -173,10 +185,9 @@ CONFIG_CRYPTO_DEV_UX500_CRYP=y
 CONFIG_CRYPTO_DEV_UX500_HASH=y
 CONFIG_CRYPTO_DEV_UX500_DEBUG=y
 CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_KERNEL=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_FTRACE is not set
 CONFIG_DEBUG_USER=y
index c799a3c49342882df3ee00a5f3c07f598f55d4a3..167d44b550f469df662cf9d9cf8a787dc52b7066 100644 (file)
@@ -77,10 +77,6 @@ struct user{
   struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */
                                /* the FP registers. */
 };
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
 
 /*
  * User specific VFP registers. If only VFPv2 is present, registers 16 to 31
index 039feb7cd590b8714f97704f36fb7231ac2be9b2..1e8a50a97edf2e49dc2075b5b119a5d561010b86 100644 (file)
@@ -1004,7 +1004,8 @@ static void __init reserve_crashkernel(void)
        total_mem = get_total_mem();
        ret = parse_crashkernel(boot_command_line, total_mem,
                                &crash_size, &crash_base);
-       if (ret)
+       /* invalid value specified or crashkernel=0 */
+       if (ret || !crash_size)
                return;
 
        if (crash_base <= 0) {
index b5efecb3d73079e04718033dd914fe7ed1805690..d0fa2037460ac7906ab5bef26e5e1ad518e15d1c 100644 (file)
@@ -54,17 +54,17 @@ int notrace unwind_frame(struct stackframe *frame)
                return -EINVAL;
 
        frame->sp = frame->fp;
-       frame->fp = *(unsigned long *)(fp);
-       frame->pc = *(unsigned long *)(fp + 4);
+       frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
+       frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 4));
 #else
        /* check current frame pointer is within bounds */
        if (fp < low + 12 || fp > high - 4)
                return -EINVAL;
 
        /* restore the registers from the stack frame */
-       frame->fp = *(unsigned long *)(fp - 12);
-       frame->sp = *(unsigned long *)(fp - 8);
-       frame->pc = *(unsigned long *)(fp - 4);
+       frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 12));
+       frame->sp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 8));
+       frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 4));
 #endif
 #ifdef CONFIG_KRETPROBES
        if (is_kretprobe_trampoline(frame->pc))
index 428012687a802a9f2cbeeb9e24f950e5bd5f960f..7f7f6bae21c2d7dd0ffb8f17e58b9fb5ea25cb47 100644 (file)
@@ -1101,11 +1101,13 @@ static int __init da850_evm_config_emac(void)
        int ret;
        u32 val;
        struct davinci_soc_info *soc_info = &davinci_soc_info;
-       u8 rmii_en = soc_info->emac_pdata->rmii_en;
+       u8 rmii_en;
 
        if (!machine_is_davinci_da850_evm())
                return 0;
 
+       rmii_en = soc_info->emac_pdata->rmii_en;
+
        cfg_chip3_base = DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG);
 
        val = __raw_readl(cfg_chip3_base);
index cc75087134d38c75ee8c08e30d637221303e6c14..4fa6ea5461b79b33d4ef79f9d2d42daddfb2a499 100644 (file)
@@ -148,8 +148,10 @@ static struct clk_hw *ep93xx_clk_register_gate(const char *name,
        psc->lock = &clk_lock;
 
        clk = clk_register(NULL, &psc->hw);
-       if (IS_ERR(clk))
+       if (IS_ERR(clk)) {
                kfree(psc);
+               return ERR_CAST(clk);
+       }
 
        return &psc->hw;
 }
@@ -207,7 +209,7 @@ static int ep93xx_mux_determine_rate(struct clk_hw *hw,
                                struct clk_rate_request *req)
 {
        unsigned long rate = req->rate;
-       struct clk *best_parent = 0;
+       struct clk *best_parent = NULL;
        unsigned long __parent_rate;
        unsigned long best_rate = 0, actual_rate, mclk_rate;
        unsigned long best_parent_rate;
index 2882674a1c39914090eeb829e92e3a960097a132..7135a0ac994956be091a373922c929bac4ca4305 100644 (file)
@@ -7,6 +7,8 @@
 #include <asm/traps.h>
 #include <asm/ptrace.h>
 
+#include "iop3xx.h"
+
 void iop_enable_cp6(void)
 {
        u32 temp;
index 0659ab4cb0af315994efd0de966c0db645e8ca12..11677fc2968f27093c6f535534e9db288f04393a 100644 (file)
@@ -59,8 +59,13 @@ static void __init omap_optee_init_check(void)
 u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2,
                                                         u32 arg3, u32 arg4)
 {
+       static u32 buf[NR_CPUS][5];
+       u32 *param;
+       int cpu;
        u32 ret;
-       u32 param[5];
+
+       cpu = get_cpu();
+       param = buf[cpu];
 
        param[0] = nargs;
        param[1] = arg1;
@@ -76,6 +81,8 @@ u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2,
        outer_clean_range(__pa(param), __pa(param + 5));
        ret = omap_smc2(idx, flag, __pa(param));
 
+       put_cpu();
+
        return ret;
 }
 
@@ -119,8 +126,8 @@ phys_addr_t omap_secure_ram_mempool_base(void)
 #if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
 u32 omap3_save_secure_ram(void __iomem *addr, int size)
 {
+       static u32 param[5];
        u32 ret;
-       u32 param[5];
 
        if (size != OMAP3_SAVE_SECURE_RAM_SZ)
                return OMAP3_SAVE_SECURE_RAM_SZ;
@@ -153,8 +160,8 @@ u32 omap3_save_secure_ram(void __iomem *addr, int size)
 u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs,
                           u32 arg1, u32 arg2, u32 arg3, u32 arg4)
 {
+       static u32 param[5];
        u32 ret;
-       u32 param[5];
 
        param[0] = nargs+1; /* RX-51 needs number of arguments + 1 */
        param[1] = arg1;
index 285e1f0f4145a072a7f8712c52b4d9383030cd7a..0d7d408c372915c85c28fa7a753b4ea3a96e4d1f 100644 (file)
@@ -236,11 +236,11 @@ static int __init jive_mtdset(char *options)
        unsigned long set;
 
        if (options == NULL || options[0] == '\0')
-               return 0;
+               return 1;
 
        if (kstrtoul(options, 10, &set)) {
                printk(KERN_ERR "failed to parse mtdset=%s\n", options);
-               return 0;
+               return 1;
        }
 
        switch (set) {
@@ -256,7 +256,7 @@ static int __init jive_mtdset(char *options)
                       "using default.", set);
        }
 
-       return 0;
+       return 1;
 }
 
 /* parse the mtdset= option given to the kernel command line */
index 1da11bdb1dfbd6f1ce906b83ba2d84ecd396f316..6e6985e756afb7eeda5f0b0b0f5cce94fa3c197b 100644 (file)
@@ -122,13 +122,13 @@ static inline bool cluster_is_a15(u32 cluster)
 }
 
 /**
- * ve_spc_global_wakeup_irq()
+ * ve_spc_global_wakeup_irq() - sets/clears global wakeup IRQs
+ *
+ * @set: if true, global wake-up IRQs are set, if false they are cleared
  *
  * Function to set/clear global wakeup IRQs. Not protected by locking since
  * it might be used in code paths where normal cacheable locks are not
  * working. Locking must be provided by the caller to ensure atomicity.
- *
- * @set: if true, global wake-up IRQs are set, if false they are cleared
  */
 void ve_spc_global_wakeup_irq(bool set)
 {
@@ -145,15 +145,15 @@ void ve_spc_global_wakeup_irq(bool set)
 }
 
 /**
- * ve_spc_cpu_wakeup_irq()
- *
- * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since
- * it might be used in code paths where normal cacheable locks are not
- * working. Locking must be provided by the caller to ensure atomicity.
+ * ve_spc_cpu_wakeup_irq() - sets/clears per-CPU wake-up IRQs
  *
  * @cluster: mpidr[15:8] bitfield describing cluster affinity level
  * @cpu: mpidr[7:0] bitfield describing cpu affinity level
  * @set: if true, wake-up IRQs are set, if false they are cleared
+ *
+ * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since
+ * it might be used in code paths where normal cacheable locks are not
+ * working. Locking must be provided by the caller to ensure atomicity.
  */
 void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set)
 {
@@ -200,14 +200,14 @@ void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr)
 }
 
 /**
- * ve_spc_powerdown()
+ * ve_spc_powerdown() - enables/disables cluster powerdown
+ *
+ * @cluster: mpidr[15:8] bitfield describing cluster affinity level
+ * @enable: if true enables powerdown, if false disables it
  *
  * Function to enable/disable cluster powerdown. Not protected by locking
  * since it might be used in code paths where normal cacheable locks are not
  * working. Locking must be provided by the caller to ensure atomicity.
- *
- * @cluster: mpidr[15:8] bitfield describing cluster affinity level
- * @enable: if true enables powerdown, if false disables it
  */
 void ve_spc_powerdown(u32 cluster, bool enable)
 {
@@ -228,7 +228,7 @@ static u32 standbywfi_cpu_mask(u32 cpu, u32 cluster)
 }
 
 /**
- * ve_spc_cpu_in_wfi(u32 cpu, u32 cluster)
+ * ve_spc_cpu_in_wfi() - Checks if the specified CPU is in WFI or not
  *
  * @cpu: mpidr[7:0] bitfield describing CPU affinity level within cluster
  * @cluster: mpidr[15:8] bitfield describing cluster affinity level
@@ -580,7 +580,7 @@ static int __init ve_spc_clk_init(void)
                }
 
                cluster = topology_physical_package_id(cpu_dev->id);
-               if (init_opp_table[cluster])
+               if (cluster < 0 || init_opp_table[cluster])
                        continue;
 
                if (ve_init_opp_table(cpu_dev))
index 4b61541853ea1c7ea6d996bf0332d7eb4f69ba2f..82ffac621854f863f8cb4b9b4f17621ffc83eb7a 100644 (file)
@@ -381,6 +381,7 @@ out:
  */
 postcore_initcall(atomic_pool_init);
 
+#ifdef CONFIG_CMA_AREAS
 struct dma_contig_early_reserve {
        phys_addr_t base;
        unsigned long size;
@@ -435,6 +436,7 @@ void __init dma_contiguous_remap(void)
                iotable_init(&map, 1);
        }
 }
+#endif
 
 static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data)
 {
index 9ff683612f2a8cfc3e59f198e2b806415878bf63..d7ffccb7fea7d6f759104b62f9ab40a8d53cdeeb 100644 (file)
@@ -88,6 +88,10 @@ extern phys_addr_t arm_lowmem_limit;
 
 void __init bootmem_init(void);
 void arm_mm_memblock_reserve(void);
+#ifdef CONFIG_CMA_AREAS
 void dma_contiguous_remap(void);
+#else
+static inline void dma_contiguous_remap(void) { }
+#endif
 
 unsigned long __clear_cr(unsigned long mask);
index 4a5c50f67cedddaa9da2c3115e3c7248ce677a21..81f13bdf32f2b98ad64e5aeb8bcb489c2a43d7fa 100644 (file)
@@ -29,8 +29,7 @@ kapi: $(kapi-hdrs-y) $(gen-y)
 uapi:  $(uapi-hdrs-y)
 
 # Create output directory if not already present
-_dummy := $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') \
-          $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')
+$(shell mkdir -p $(kapi) $(uapi))
 
 quiet_cmd_gen_mach = GEN     $@
       cmd_gen_mach = $(AWK) -f $(real-prereqs) > $@
index 6a6093064a329bc4f0530ac2d36983dfda01bb39..68103a8b0ef531b5da35133290d2de212cd3f826 100644 (file)
@@ -1,4 +1,2 @@
 # SPDX-License-Identifier: GPL-2.0
-dtb-$(CONFIG_ARCH_SEATTLE) += amd-overdrive.dtb \
-                       amd-overdrive-rev-b0.dtb amd-overdrive-rev-b1.dtb \
-                       husky.dtb
+dtb-$(CONFIG_ARCH_SEATTLE) += amd-overdrive-rev-b0.dtb amd-overdrive-rev-b1.dtb
index 8e341be9a3991aa0bed664f50c8e4b581f6e46d9..c290d1ce2b037dc870ea4f897cfeb81465753720 100644 (file)
@@ -9,6 +9,7 @@
 /dts-v1/;
 
 /include/ "amd-seattle-soc.dtsi"
+/include/ "amd-seattle-cpus.dtsi"
 
 / {
        model = "AMD Seattle (Rev.B0) Development Board (Overdrive)";
        status = "ok";
 };
 
-&gpio2 {
-       status = "ok";
-};
-
-&gpio3 {
-       status = "ok";
-};
-
 &gpio4 {
        status = "ok";
 };
        };
 };
 
-&ipmi_kcs {
-       status = "ok";
-};
-
 &smb0 {
        /include/ "amd-seattle-xgbe-b.dtsi"
 };
index 92cef05c6b74f50a5b1a11d491ad932e8b2084c1..e0926f6bb7c334ac3f26e87245322f0c541da9f2 100644 (file)
@@ -9,6 +9,7 @@
 /dts-v1/;
 
 /include/ "amd-seattle-soc.dtsi"
+/include/ "amd-seattle-cpus.dtsi"
 
 / {
        model = "AMD Seattle (Rev.B1) Development Board (Overdrive)";
diff --git a/arch/arm64/boot/dts/amd/amd-overdrive.dts b/arch/arm64/boot/dts/amd/amd-overdrive.dts
deleted file mode 100644 (file)
index 41b3a6c..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DTS file for AMD Seattle Overdrive Development Board
- *
- * Copyright (C) 2014 Advanced Micro Devices, Inc.
- */
-
-/dts-v1/;
-
-/include/ "amd-seattle-soc.dtsi"
-
-/ {
-       model = "AMD Seattle Development Board (Overdrive)";
-       compatible = "amd,seattle-overdrive", "amd,seattle";
-
-       chosen {
-               stdout-path = &serial0;
-       };
-};
-
-&ccp0 {
-       status = "ok";
-};
-
-&gpio0 {
-       status = "ok";
-};
-
-&gpio1 {
-       status = "ok";
-};
-
-&i2c0 {
-       status = "ok";
-};
-
-&pcie0 {
-       status = "ok";
-};
-
-&spi0 {
-       status = "ok";
-};
-
-&spi1 {
-       status = "ok";
-       sdcard0: sdcard@0 {
-               compatible = "mmc-spi-slot";
-               reg = <0>;
-               spi-max-frequency = <20000000>;
-               voltage-ranges = <3200 3400>;
-               gpios = <&gpio0 7 0>;
-               interrupt-parent = <&gpio0>;
-               interrupts = <7 3>;
-               pl022,hierarchy = <0>;
-               pl022,interface = <0>;
-               pl022,com-mode = <0x0>;
-               pl022,rx-level-trig = <0>;
-               pl022,tx-level-trig = <0>;
-       };
-};
-
-&v2m0 {
-       arm,msi-base-spi = <64>;
-       arm,msi-num-spis = <256>;
-};
diff --git a/arch/arm64/boot/dts/amd/amd-seattle-cpus.dtsi b/arch/arm64/boot/dts/amd/amd-seattle-cpus.dtsi
new file mode 100644 (file)
index 0000000..93688a0
--- /dev/null
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/ {
+       cpus {
+               #address-cells = <0x1>;
+               #size-cells = <0x0>;
+
+               cpu-map {
+                       cluster0 {
+                               core0 {
+                                       cpu = <&CPU0>;
+                               };
+                               core1 {
+                                       cpu = <&CPU1>;
+                               };
+                       };
+                       cluster1 {
+                               core0 {
+                                       cpu = <&CPU2>;
+                               };
+                               core1 {
+                                       cpu = <&CPU3>;
+                               };
+                       };
+                       cluster2 {
+                               core0 {
+                                       cpu = <&CPU4>;
+                               };
+                               core1 {
+                                       cpu = <&CPU5>;
+                               };
+                       };
+                       cluster3 {
+                               core0 {
+                                       cpu = <&CPU6>;
+                               };
+                               core1 {
+                                       cpu = <&CPU7>;
+                               };
+                       };
+               };
+
+               CPU0: cpu@0 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x0>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_0>;
+
+               };
+
+               CPU1: cpu@1 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x1>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_0>;
+               };
+
+               CPU2: cpu@100 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x100>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_1>;
+               };
+
+               CPU3: cpu@101 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x101>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_1>;
+               };
+
+               CPU4: cpu@200 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x200>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_2>;
+               };
+
+               CPU5: cpu@201 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x201>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_2>;
+               };
+
+               CPU6: cpu@300 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x300>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_3>;
+               };
+
+               CPU7: cpu@301 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a57";
+                       reg = <0x301>;
+                       enable-method = "psci";
+
+                       i-cache-size = <0xC000>;
+                       i-cache-line-size = <64>;
+                       i-cache-sets = <256>;
+                       d-cache-size = <0x8000>;
+                       d-cache-line-size = <64>;
+                       d-cache-sets = <256>;
+                       l2-cache = <&L2_3>;
+               };
+       };
+
+       L2_0: l2-cache0 {
+               cache-size = <0x100000>;
+               cache-line-size = <64>;
+               cache-sets = <1024>;
+               cache-unified;
+               next-level-cache = <&L3>;
+       };
+
+       L2_1: l2-cache1 {
+               cache-size = <0x100000>;
+               cache-line-size = <64>;
+               cache-sets = <1024>;
+               cache-unified;
+               next-level-cache = <&L3>;
+       };
+
+       L2_2: l2-cache2 {
+               cache-size = <0x100000>;
+               cache-line-size = <64>;
+               cache-sets = <1024>;
+               cache-unified;
+               next-level-cache = <&L3>;
+       };
+
+       L2_3: l2-cache3 {
+               cache-size = <0x100000>;
+               cache-line-size = <64>;
+               cache-sets = <1024>;
+               cache-unified;
+               next-level-cache = <&L3>;
+       };
+
+       L3: l3-cache {
+               cache-level = <3>;
+               cache-size = <0x800000>;
+               cache-line-size = <64>;
+               cache-sets = <8192>;
+               cache-unified;
+       };
+
+       pmu {
+               compatible = "arm,cortex-a57-pmu";
+               interrupts = <0x0 0x7 0x4>,
+                            <0x0 0x8 0x4>,
+                            <0x0 0x9 0x4>,
+                            <0x0 0xa 0x4>,
+                            <0x0 0xb 0x4>,
+                            <0x0 0xc 0x4>,
+                            <0x0 0xd 0x4>,
+                            <0x0 0xe 0x4>;
+               interrupt-affinity = <&CPU0>,
+                                    <&CPU1>,
+                                    <&CPU2>,
+                                    <&CPU3>,
+                                    <&CPU4>,
+                                    <&CPU5>,
+                                    <&CPU6>,
+                                    <&CPU7>;
+       };
+};
index b664e7af74eb3a99953796816ed2e5525b169832..690020589d41fe5cc3c5fec5e23f9547a63064e6 100644 (file)
                             <1 10 0xff04>;
        };
 
-       pmu {
-               compatible = "arm,armv8-pmuv3";
-               interrupts = <0 7 4>,
-                            <0 8 4>,
-                            <0 9 4>,
-                            <0 10 4>,
-                            <0 11 4>,
-                            <0 12 4>,
-                            <0 13 4>,
-                            <0 14 4>;
-       };
-
        smb0: smb {
                compatible = "simple-bus";
                #address-cells = <2>;
@@ -70,6 +58,7 @@
                        reg = <0 0xe0300000 0 0xf0000>;
                        interrupts = <0 355 4>;
                        clocks = <&sataclk_333mhz>;
+                       iommus = <&sata0_smmu 0x0 0x1f>;
                        dma-coherent;
                };
 
                        reg = <0 0xe0d00000 0 0xf0000>;
                        interrupts = <0 354 4>;
                        clocks = <&sataclk_333mhz>;
+                       iommus = <&sata1_smmu 0x0e>,
+                                <&sata1_smmu 0x0f>,
+                                <&sata1_smmu 0x1e>;
+                       dma-coherent;
+               };
+
+               sata0_smmu: iommu@e0200000 {
+                       compatible = "arm,mmu-401";
+                       reg = <0 0xe0200000 0 0x10000>;
+                       #global-interrupts = <1>;
+                       interrupts = <0 332 4>, <0 332 4>;
+                       #iommu-cells = <2>;
+                       dma-coherent;
+               };
+
+               sata1_smmu: iommu@e0c00000 {
+                       compatible = "arm,mmu-401";
+                       reg = <0 0xe0c00000 0 0x10000>;
+                       #global-interrupts = <1>;
+                       interrupts = <0 331 4>, <0 331 4>;
+                       #iommu-cells = <1>;
                        dma-coherent;
                };
 
                        reg = <0 0xe0100000 0 0x10000>;
                        interrupts = <0 3 4>;
                        dma-coherent;
+                       iommus = <&sata1_smmu 0x00>,
+                                <&sata1_smmu 0x02>,
+                                <&sata1_smmu 0x40>,
+                                <&sata1_smmu 0x42>;
                };
 
                pcie0: pcie@f0000000 {
                        msi-parent = <&v2m0>;
                        reg = <0 0xf0000000 0 0x10000000>;
 
-                       interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+                       interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
                        interrupt-map =
-                               <0x1000 0x0 0x0 0x1 &gic0 0x0 0x0 0x0 0x120 0x1>,
-                               <0x1000 0x0 0x0 0x2 &gic0 0x0 0x0 0x0 0x121 0x1>,
-                               <0x1000 0x0 0x0 0x3 &gic0 0x0 0x0 0x0 0x122 0x1>,
-                               <0x1000 0x0 0x0 0x4 &gic0 0x0 0x0 0x0 0x123 0x1>;
+                               <0x1100 0x0 0x0 0x1 &gic0 0x0 0x0 0x0 0x120 0x1>,
+                               <0x1100 0x0 0x0 0x2 &gic0 0x0 0x0 0x0 0x121 0x1>,
+                               <0x1100 0x0 0x0 0x3 &gic0 0x0 0x0 0x0 0x122 0x1>,
+                               <0x1100 0x0 0x0 0x4 &gic0 0x0 0x0 0x0 0x123 0x1>,
+
+                               <0x1200 0x0 0x0 0x1 &gic0 0x0 0x0 0x0 0x124 0x1>,
+                               <0x1200 0x0 0x0 0x2 &gic0 0x0 0x0 0x0 0x125 0x1>,
+                               <0x1200 0x0 0x0 0x3 &gic0 0x0 0x0 0x0 0x126 0x1>,
+                               <0x1200 0x0 0x0 0x4 &gic0 0x0 0x0 0x0 0x127 0x1>,
+
+                               <0x1300 0x0 0x0 0x1 &gic0 0x0 0x0 0x0 0x128 0x1>,
+                               <0x1300 0x0 0x0 0x2 &gic0 0x0 0x0 0x0 0x129 0x1>,
+                               <0x1300 0x0 0x0 0x3 &gic0 0x0 0x0 0x0 0x12a 0x1>,
+                               <0x1300 0x0 0x0 0x4 &gic0 0x0 0x0 0x0 0x12b 0x1>;
 
                        dma-coherent;
                        dma-ranges = <0x43000000 0x0 0x0 0x0 0x0 0x100 0x0>;
                                <0x01000000 0x00 0x00000000 0x00 0xefff0000 0x00 0x00010000>,
                                /* 32-bit MMIO (size=2G) */
                                <0x02000000 0x00 0x40000000 0x00 0x40000000 0x00 0x80000000>,
-                               /* 64-bit MMIO (size= 124G) */
+                               /* 64-bit MMIO (size= 508G) */
                                <0x03000000 0x01 0x00000000 0x01 0x00000000 0x7f 0x00000000>;
+                       iommu-map = <0x0 &pcie_smmu 0x0 0x10000>;
+               };
+
+               pcie_smmu: iommu@e0a00000 {
+                       compatible = "arm,mmu-401";
+                       reg = <0 0xe0a00000 0 0x10000>;
+                       #global-interrupts = <1>;
+                       interrupts = <0 333 4>, <0 333 4>;
+                       #iommu-cells = <1>;
+                       dma-coherent;
                };
 
                /* Perf CCN504 PMU */
index d97498361ce3b7f6565030d766c4f8a45a624d18..9259e547e2e814d957b46804ea1d56078401d139 100644 (file)
@@ -55,7 +55,7 @@
                clocks = <&xgmacclk0_dma_250mhz>, <&xgmacclk0_ptp_250mhz>;
                clock-names = "dma_clk", "ptp_clk";
                phy-mode = "xgmii";
-               #stream-id-cells = <16>;
+               iommus = <&xgmac0_smmu 0x00 0x17>; /* 0-7, 16-23 */
                dma-coherent;
        };
 
                clocks = <&xgmacclk1_dma_250mhz>, <&xgmacclk1_ptp_250mhz>;
                clock-names = "dma_clk", "ptp_clk";
                phy-mode = "xgmii";
-               #stream-id-cells = <16>;
+               iommus = <&xgmac1_smmu 0x00 0x17>; /* 0-7, 16-23 */
                dma-coherent;
        };
 
-       xgmac0_smmu: smmu@e0600000 {
+       xgmac0_smmu: iommu@e0600000 {
                 compatible = "arm,mmu-401";
                 reg = <0 0xe0600000 0 0x10000>;
                 #global-interrupts = <1>;
                               */
                              <0 336 4>,
                              <0 336 4>;
-
-                mmu-masters = <&xgmac0
-                         0  1  2  3  4  5  6  7
-                        16 17 18 19 20 21 22 23
-                >;
+               #iommu-cells = <2>;
+               dma-coherent;
         };
 
-        xgmac1_smmu: smmu@e0800000 {
+        xgmac1_smmu: iommu@e0800000 {
                 compatible = "arm,mmu-401";
                 reg = <0 0xe0800000 0 0x10000>;
                 #global-interrupts = <1>;
                               */
                              <0 335 4>,
                              <0 335 4>;
-
-                mmu-masters = <&xgmac1
-                         0  1  2  3  4  5  6  7
-                        16 17 18 19 20 21 22 23
-                >;
+               #iommu-cells = <2>;
+               dma-coherent;
         };
diff --git a/arch/arm64/boot/dts/amd/husky.dts b/arch/arm64/boot/dts/amd/husky.dts
deleted file mode 100644 (file)
index 7acde34..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DTS file for AMD/Linaro 96Boards Enterprise Edition Server (Husky) Board
- * Note: Based-on AMD Seattle Rev.B0
- *
- * Copyright (C) 2015 Advanced Micro Devices, Inc.
- */
-
-/dts-v1/;
-
-/include/ "amd-seattle-soc.dtsi"
-
-/ {
-       model = "Linaro 96Boards Enterprise Edition Server (Husky) Board";
-       compatible = "amd,seattle-overdrive", "amd,seattle";
-
-       chosen {
-               stdout-path = &serial0;
-       };
-
-       psci {
-               compatible   = "arm,psci-0.2";
-               method       = "smc";
-       };
-};
-
-&ccp0 {
-       status = "ok";
-       amd,zlib-support = <1>;
-};
-
-/**
- * NOTE: In Rev.B, gpio0 is reserved.
- */
-&gpio1 {
-       status = "ok";
-};
-
-&gpio2 {
-       status = "ok";
-};
-
-&gpio3 {
-       status = "ok";
-};
-
-&gpio4 {
-       status = "ok";
-};
-
-&i2c0 {
-       status = "ok";
-};
-
-&i2c1 {
-       status = "ok";
-};
-
-&pcie0 {
-       status = "ok";
-};
-
-&spi0 {
-       status = "ok";
-};
-
-&spi1 {
-       status = "ok";
-       sdcard0: sdcard@0 {
-               compatible = "mmc-spi-slot";
-               reg = <0>;
-               spi-max-frequency = <20000000>;
-               voltage-ranges = <3200 3400>;
-               pl022,hierarchy = <0>;
-               pl022,interface = <0>;
-               pl022,com-mode = <0x0>;
-               pl022,rx-level-trig = <0>;
-               pl022,tx-level-trig = <0>;
-       };
-};
-
-&smb0 {
-       /include/ "amd-seattle-xgbe-b.dtsi"
-};
index 01b01e3204118c526cbd9c04b58f08f424eeaf6e..35d1939e690b0ee6275f994f46689a7708ffcde7 100644 (file)
                        clock-names = "i2c";
                        clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
                                            QORIQ_CLK_PLL_DIV(1)>;
-                       dmas = <&edma0 1 39>,
-                              <&edma0 1 38>;
-                       dma-names = "tx", "rx";
+                       dmas = <&edma0 1 38>,
+                              <&edma0 1 39>;
+                       dma-names = "rx", "tx";
                        status = "disabled";
                };
 
index 687fea6d8afa4b096575472b083e912c97e67d30..4e7bd04d979841d7cbb6fb42a765c9ca0af11eb8 100644 (file)
                        interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
                                            QORIQ_CLK_PLL_DIV(2)>;
-                       dmas = <&edma0 1 39>,
-                              <&edma0 1 38>;
-                       dma-names = "tx", "rx";
+                       dmas = <&edma0 1 38>,
+                              <&edma0 1 39>;
+                       dma-names = "rx", "tx";
                        status = "disabled";
                };
 
index 1dc9d187601c54d4dd5dbe21521e79a14fe3e8ff..a0bd540f27d3d43e0e4819e99671ab348c72eced 100644 (file)
                pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
 
                ti,x-min = /bits/ 16 <125>;
-               touchscreen-size-x = /bits/ 16 <4008>;
+               touchscreen-size-x = <4008>;
                ti,y-min = /bits/ 16 <282>;
-               touchscreen-size-y = /bits/ 16 <3864>;
+               touchscreen-size-y = <3864>;
                ti,x-plate-ohms = /bits/ 16 <180>;
-               touchscreen-max-pressure = /bits/ 16 <255>;
-               touchscreen-average-samples = /bits/ 16 <10>;
+               touchscreen-max-pressure = <255>;
+               touchscreen-average-samples = <10>;
                ti,debounce-tol = /bits/ 16 <3>;
                ti,debounce-rep = /bits/ 16 <1>;
                ti,settle-delay-usec = /bits/ 16 <150>;
index b16c7caf34c1163fa97aeac515983c21140d88de..87b5e23c766f7361cdb6b613603e2d80c23c60b8 100644 (file)
                pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
 
                ti,x-min = /bits/ 16 <125>;
-               touchscreen-size-x = /bits/ 16 <4008>;
+               touchscreen-size-x = <4008>;
                ti,y-min = /bits/ 16 <282>;
-               touchscreen-size-y = /bits/ 16 <3864>;
+               touchscreen-size-y = <3864>;
                ti,x-plate-ohms = /bits/ 16 <180>;
-               touchscreen-max-pressure = /bits/ 16 <255>;
-               touchscreen-average-samples = /bits/ 16 <10>;
+               touchscreen-max-pressure = <255>;
+               touchscreen-average-samples = <10>;
                ti,debounce-tol = /bits/ 16 <3>;
                ti,debounce-rep = /bits/ 16 <1>;
                ti,settle-delay-usec = /bits/ 16 <150>;
index aff857df25cf4c35ec71bad4c896be4e7a5b485c..1df84335925b59204f198d795b3cffc02cb58328 100644 (file)
                                        gpio4 {
                                                pins = "gpio4";
                                                function = "32k-out1";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio5 {
                                                pins = "gpio5";
                                                function = "gpio";
-                                               drive-push-pull = <0>;
+                                               drive-push-pull;
                                        };
 
                                        gpio6 {
                                                pins = "gpio6";
                                                function = "gpio";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio7 {
                                                pins = "gpio7";
                                                function = "gpio";
-                                               drive-push-pull = <0>;
+                                               drive-push-pull;
                                        };
                                };
 
index 4631504c3c7a6456416a1df7757b0a72a0d4362f..1ab132c152bb9e22c3ed1a8d64605e8f693a8df9 100644 (file)
                                        gpio4 {
                                                pins = "gpio4";
                                                function = "32k-out1";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio5 {
                                                pins = "gpio5";
                                                function = "gpio";
-                                               drive-push-pull = <0>;
+                                               drive-push-pull;
                                        };
 
                                        gpio6 {
                                                pins = "gpio6";
                                                function = "gpio";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio7 {
                                                pins = "gpio7";
                                                function = "gpio";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
                                };
 
index a7d7cfd66379f305173a5d81e59c1421f85ae93e..634d0f493c2e8f938d8e63eab4066955389cc061 100644 (file)
                                        gpio4 {
                                                pins = "gpio4";
                                                function = "32k-out1";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio6 {
                                                pins = "gpio6";
                                                function = "gpio";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio7 {
                                                pins = "gpio7";
                                                function = "gpio";
-                                               drive-push-pull = <0>;
+                                               drive-push-pull;
                                        };
                                };
 
index 0bd66f9c620b92f1370e3db4a61116e86f6a57c4..0b219e72765edbfdd683325defa6f0b69cf5945e 100644 (file)
                                        gpio4 {
                                                pins = "gpio4";
                                                function = "32k-out1";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio6 {
                                                pins = "gpio6";
                                                function = "gpio";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio7 {
                                                pins = "gpio7";
                                                function = "gpio";
-                                               drive-push-pull = <0>;
+                                               drive-push-pull;
                                        };
                                };
 
index 75eb743a724270230a5f3f7b18163596b15b914e..0fe772b04bd02242511c89ca808c42083aca344c 100644 (file)
@@ -59,7 +59,7 @@
                                gpio1 {
                                        pins = "gpio1";
                                        function = "fps-out";
-                                       drive-push-pull = <1>;
+                                       drive-push-pull;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                        maxim,active-fps-power-up-slot = <7>;
                                        maxim,active-fps-power-down-slot = <0>;
@@ -68,7 +68,7 @@
                                gpio2_3 {
                                        pins = "gpio2", "gpio3";
                                        function = "fps-out";
-                                       drive-open-drain = <1>;
+                                       drive-open-drain;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                };
 
@@ -80,7 +80,7 @@
                                gpio5_6_7 {
                                        pins = "gpio5", "gpio6", "gpio7";
                                        function = "gpio";
-                                       drive-push-pull = <1>;
+                                       drive-push-pull;
                                };
                        };
 
index 10347b6e6e8478f9130b5952b1b8d2a8bde40e6f..936a309e288c3ecf090afeb51e438b40f8d50a48 100644 (file)
                                gpio1 {
                                        pins = "gpio1";
                                        function = "fps-out";
-                                       drive-push-pull = <1>;
+                                       drive-push-pull;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                        maxim,active-fps-power-up-slot = <7>;
                                        maxim,active-fps-power-down-slot = <0>;
                                gpio2 {
                                        pins = "gpio2";
                                        function = "fps-out";
-                                       drive-open-drain = <1>;
+                                       drive-open-drain;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                };
 
                                gpio3 {
                                        pins = "gpio3";
                                        function = "fps-out";
-                                       drive-open-drain = <1>;
+                                       drive-open-drain;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                };
 
                                gpio5_6_7 {
                                        pins = "gpio5", "gpio6", "gpio7";
                                        function = "gpio";
-                                       drive-push-pull = <1>;
+                                       drive-push-pull;
                                };
                        };
 
index 72c2dc3c14eac4dc9df481ccbb4cc5b5b5e84f82..f6446120c26725bb905759b08d2e886938e7d715 100644 (file)
                                gpio1 {
                                        pins = "gpio1";
                                        function = "fps-out";
-                                       drive-push-pull = <1>;
+                                       drive-push-pull;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
                                        maxim,active-fps-power-up-slot = <0>;
                                        maxim,active-fps-power-down-slot = <7>;
                                gpio2 {
                                        pins = "gpio2";
                                        function = "fps-out";
-                                       drive-open-drain = <1>;
+                                       drive-open-drain;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                        maxim,active-fps-power-up-slot = <0>;
                                        maxim,active-fps-power-down-slot = <7>;
                                gpio3 {
                                        pins = "gpio3";
                                        function = "fps-out";
-                                       drive-open-drain = <1>;
+                                       drive-open-drain;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                        maxim,active-fps-power-up-slot = <4>;
                                        maxim,active-fps-power-down-slot = <3>;
                                gpio5_6_7 {
                                        pins = "gpio5", "gpio6", "gpio7";
                                        function = "gpio";
-                                       drive-push-pull = <1>;
+                                       drive-push-pull;
                                };
                        };
 
index a263d51882ee766ac6061eb6c812a4075458d915..e42384f097d64847652d991acfb2631ce48fd906 100644 (file)
                                gpio3 {
                                        pins = "gpio3";
                                        function = "fps-out";
-                                       drive-open-drain = <1>;
+                                       drive-open-drain;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                        maxim,active-fps-power-up-slot = <4>;
                                        maxim,active-fps-power-down-slot = <2>;
                                gpio5_6 {
                                        pins = "gpio5", "gpio6";
                                        function = "gpio";
-                                       drive-push-pull = <1>;
+                                       drive-push-pull;
                                };
 
                                gpio4 {
index f0f81c23c16f2e787a5b2584d18a5400d1858ceb..b9a48cfd760faf2382f6498777563762732a8c55 100644 (file)
                                pins = "gpio47", "gpio48";
                                function = "blsp_i2c3";
                                drive-strength = <16>;
-                               bias-disable = <0>;
+                               bias-disable;
                        };
 
                        blsp1_i2c3_sleep: blsp1-i2c2-sleep {
                                pins = "gpio47", "gpio48";
                                function = "gpio";
                                drive-strength = <2>;
-                               bias-disable = <0>;
+                               bias-disable;
                        };
 
                        blsp2_uart3_4pins_default: blsp2-uart2-4pins {
index e90f99ef532346929a22c7bcc506f56f9cf1a5e0..e47c74e513afddec831849f8dd6cddf099631c39 100644 (file)
@@ -33,7 +33,7 @@ ap_h1_spi: &spi0 {};
 };
 
 &alc5682 {
-       realtek,dmic-clk-driving-high = "true";
+       realtek,dmic-clk-driving-high;
 };
 
 &cpu6_alert0 {
index 1084d5ce9ac7a9cbd0ec6e7702c2122f0ec2aa5e..07b729f9fec5e68958b99c00034eaec9f8e30549 100644 (file)
                        pins = "gpio6", "gpio25", "gpio26";
                        function = "gpio";
                        drive-strength = <8>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
index 8553c8bf79bd40015052c915bbea2dfb5b5c6da0..103cc40816fd3779027c334deb854f1dcd39cf88 100644 (file)
                config {
                        pins = "gpio6", "gpio11";
                        drive-strength = <8>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
index 232b439cbaf3d8511199fa69399ce27d47c336c6..ff8f4511df71f73d671bbb57449e57ed94f3e7d9 100644 (file)
@@ -75,6 +75,7 @@
 #define ARM_CPU_PART_CORTEX_A77                0xD0D
 #define ARM_CPU_PART_NEOVERSE_V1       0xD40
 #define ARM_CPU_PART_CORTEX_A78                0xD41
+#define ARM_CPU_PART_CORTEX_A78AE      0xD42
 #define ARM_CPU_PART_CORTEX_X1         0xD44
 #define ARM_CPU_PART_CORTEX_A510       0xD46
 #define ARM_CPU_PART_CORTEX_A710       0xD47
 #define MIDR_CORTEX_A77        MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
 #define MIDR_NEOVERSE_V1       MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
 #define MIDR_CORTEX_A78        MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
+#define MIDR_CORTEX_A78AE      MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
 #define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
 #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
 #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
index 7f3c87f7a0cec758928c9d46dcce97058b0e413e..c31be7eda9df413aaef23c8a59a883efccf0047a 100644 (file)
        isb                                     // Make sure SRE is now set
        mrs_s   x0, SYS_ICC_SRE_EL2             // Read SRE back,
        tbz     x0, #0, .Lskip_gicv3_\@         // and check that it sticks
-       msr_s   SYS_ICH_HCR_EL2, xzr            // Reset ICC_HCR_EL2 to defaults
+       msr_s   SYS_ICH_HCR_EL2, xzr            // Reset ICH_HCR_EL2 to defaults
 .Lskip_gicv3_\@:
 .endm
 
index d62405ce3e6de53c37a94b2d10613f81b7d991e6..7496deab025ad3397780be02e295a41b95d73505 100644 (file)
@@ -43,10 +43,22 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
 
 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
 
+#if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
 {
        return !(vcpu->arch.hcr_el2 & HCR_RW);
 }
+#else
+static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = vcpu->kvm;
+
+       WARN_ON_ONCE(!test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED,
+                              &kvm->arch.flags));
+
+       return test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
+}
+#endif
 
 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
 {
@@ -72,15 +84,14 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
                vcpu->arch.hcr_el2 |= HCR_TVM;
        }
 
-       if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
+       if (vcpu_el1_is_32bit(vcpu))
                vcpu->arch.hcr_el2 &= ~HCR_RW;
-
-       /*
-        * TID3: trap feature register accesses that we virtualise.
-        * For now this is conditional, since no AArch32 feature regs
-        * are currently virtualised.
-        */
-       if (!vcpu_el1_is_32bit(vcpu))
+       else
+               /*
+                * TID3: trap feature register accesses that we virtualise.
+                * For now this is conditional, since no AArch32 feature regs
+                * are currently virtualised.
+                */
                vcpu->arch.hcr_el2 |= HCR_TID3;
 
        if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
index e3b25dc6c367ae8302ea4bd4d1dc3eb673e77b19..94a27a7520f4740e64e202599c11fa75441b4e44 100644 (file)
@@ -127,6 +127,16 @@ struct kvm_arch {
 #define KVM_ARCH_FLAG_MTE_ENABLED                      1
        /* At least one vCPU has ran in the VM */
 #define KVM_ARCH_FLAG_HAS_RAN_ONCE                     2
+       /*
+        * The following two bits are used to indicate the guest's EL1
+        * register width configuration. A value of KVM_ARCH_FLAG_EL1_32BIT
+        * bit is valid only when KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED is set.
+        * Otherwise, the guest's EL1 register width has not yet been
+        * determined yet.
+        */
+#define KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED             3
+#define KVM_ARCH_FLAG_EL1_32BIT                                4
+
        unsigned long flags;
 
        /*
index 3fb79b76e9d96adae5de2f08f3974a4a40ff898e..7bbf5104b7b7bd9985849a774051abb591d07b0d 100644 (file)
@@ -42,7 +42,7 @@ bool alternative_is_applied(u16 cpufeature)
 /*
  * Check if the target PC is within an alternative block.
  */
-static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
+static __always_inline bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
 {
        unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
        return !(pc >= replptr && pc <= (replptr + alt->alt_len));
@@ -50,7 +50,7 @@ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
 
 #define align_down(x, a)       ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
 
-static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
+static __always_inline u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
 {
        u32 insn;
 
@@ -95,7 +95,7 @@ static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnp
        return insn;
 }
 
-static void patch_alternative(struct alt_instr *alt,
+static noinstr void patch_alternative(struct alt_instr *alt,
                              __le32 *origptr, __le32 *updptr, int nr_inst)
 {
        __le32 *replptr;
index 3ed39c61a510c7c164073bc1c9431298378e1b1b..2b3f3d0544b9808aa1240df835477dd4991b3f85 100644 (file)
@@ -8,16 +8,9 @@
 #include <asm/cpufeature.h>
 #include <asm/mte.h>
 
-#ifndef VMA_ITERATOR
-#define VMA_ITERATOR(name, mm, addr)   \
-       struct mm_struct *name = mm
-#define for_each_vma(vmi, vma)         \
-       for (vma = vmi->mmap; vma; vma = vma->vm_next)
-#endif
-
-#define for_each_mte_vma(vmi, vma)                                     \
+#define for_each_mte_vma(tsk, vma)                                     \
        if (system_supports_mte())                                      \
-               for_each_vma(vmi, vma)                                  \
+               for (vma = tsk->mm->mmap; vma; vma = vma->vm_next)      \
                        if (vma->vm_flags & VM_MTE)
 
 static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
@@ -32,10 +25,11 @@ static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
 static int mte_dump_tag_range(struct coredump_params *cprm,
                              unsigned long start, unsigned long end)
 {
+       int ret = 1;
        unsigned long addr;
+       void *tags = NULL;
 
        for (addr = start; addr < end; addr += PAGE_SIZE) {
-               char tags[MTE_PAGE_TAG_STORAGE];
                struct page *page = get_dump_page(addr);
 
                /*
@@ -59,22 +53,36 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
                        continue;
                }
 
+               if (!tags) {
+                       tags = mte_allocate_tag_storage();
+                       if (!tags) {
+                               put_page(page);
+                               ret = 0;
+                               break;
+                       }
+               }
+
                mte_save_page_tags(page_address(page), tags);
                put_page(page);
-               if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE))
-                       return 0;
+               if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE)) {
+                       mte_free_tag_storage(tags);
+                       ret = 0;
+                       break;
+               }
        }
 
-       return 1;
+       if (tags)
+               mte_free_tag_storage(tags);
+
+       return ret;
 }
 
 Elf_Half elf_core_extra_phdrs(void)
 {
        struct vm_area_struct *vma;
        int vma_count = 0;
-       VMA_ITERATOR(vmi, current->mm, 0);
 
-       for_each_mte_vma(vmi, vma)
+       for_each_mte_vma(current, vma)
                vma_count++;
 
        return vma_count;
@@ -83,9 +91,8 @@ Elf_Half elf_core_extra_phdrs(void)
 int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
 {
        struct vm_area_struct *vma;
-       VMA_ITERATOR(vmi, current->mm, 0);
 
-       for_each_mte_vma(vmi, vma) {
+       for_each_mte_vma(current, vma) {
                struct elf_phdr phdr;
 
                phdr.p_type = PT_ARM_MEMTAG_MTE;
@@ -109,9 +116,8 @@ size_t elf_core_extra_data_size(void)
 {
        struct vm_area_struct *vma;
        size_t data_size = 0;
-       VMA_ITERATOR(vmi, current->mm, 0);
 
-       for_each_mte_vma(vmi, vma)
+       for_each_mte_vma(current, vma)
                data_size += mte_vma_tag_dump_size(vma);
 
        return data_size;
@@ -120,9 +126,8 @@ size_t elf_core_extra_data_size(void)
 int elf_core_write_extra_data(struct coredump_params *cprm)
 {
        struct vm_area_struct *vma;
-       VMA_ITERATOR(vmi, current->mm, 0);
 
-       for_each_mte_vma(vmi, vma) {
+       for_each_mte_vma(current, vma) {
                if (vma->vm_flags & VM_DONTDUMP)
                        continue;
 
index 712e97c03e54c287e4f6e9ee0e5d02ee3665a3cf..cd868084e724244d8964c95e6cbf7ba34a0b36f2 100644 (file)
@@ -701,7 +701,7 @@ NOKPROBE_SYMBOL(breakpoint_handler);
  * addresses. There is no straight-forward way, short of disassembling the
  * offending instruction, to map that address back to the watchpoint. This
  * function computes the distance of the memory access from the watchpoint as a
- * heuristic for the likelyhood that a given access triggered the watchpoint.
+ * heuristic for the likelihood that a given access triggered the watchpoint.
  *
  * See Section D2.10.5 "Determining the memory location that caused a Watchpoint
  * exception" of ARMv8 Architecture Reference Manual for details.
index e53493d8b208bb9cc3377ef07c89238f55aef97f..a3d0494f25a91de87c8e15a04fe404d516e6ef2e 100644 (file)
@@ -220,7 +220,7 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
                         * increasing the section's alignment so that the
                         * resulting address of this instruction is guaranteed
                         * to equal the offset in that particular bit (as well
-                        * as all less signficant bits). This ensures that the
+                        * as all less significant bits). This ensures that the
                         * address modulo 4 KB != 0xfff8 or 0xfffc (which would
                         * have all ones in bits [11:3])
                         */
index 771f543464e060729740949d5edf7f63762e43a9..33e0fabc0b79b7ba7794c9b3522d3cb194f65876 100644 (file)
@@ -117,8 +117,8 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
        int i, ret = 0;
        struct aarch64_insn_patch *pp = arg;
 
-       /* The first CPU becomes master */
-       if (atomic_inc_return(&pp->cpu_count) == 1) {
+       /* The last CPU becomes master */
+       if (atomic_inc_return(&pp->cpu_count) == num_online_cpus()) {
                for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
                        ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
                                                             pp->new_insns[i]);
index 5777929d35bf47664ec9f8efd6598fb9bb17cf7e..40be3a7c2c53154a17be367d56badf57b9e1c48a 100644 (file)
@@ -853,6 +853,7 @@ u8 spectre_bhb_loop_affected(int scope)
        if (scope == SCOPE_LOCAL_CPU) {
                static const struct midr_range spectre_bhb_k32_list[] = {
                        MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
+                       MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
                        MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
                        MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
                        MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
index 27df5c1e6baad53098e8cf1740cc054c2dea5d89..3b46041f2b978893eae596fdf84bb6aec6074080 100644 (file)
@@ -234,6 +234,7 @@ asmlinkage notrace void secondary_start_kernel(void)
         * Log the CPU info before it is marked online and might get read.
         */
        cpuinfo_store_cpu();
+       store_cpu_topology(cpu);
 
        /*
         * Enable GIC and timers.
@@ -242,7 +243,6 @@ asmlinkage notrace void secondary_start_kernel(void)
 
        ipi_setup(cpu);
 
-       store_cpu_topology(cpu);
        numa_add_cpu(cpu);
 
        /*
index 19ee7c33769d3f5820762cf0b6b497963bdb6bca..2b0887e58a7c4df64cf6cedf7968fd305cf99283 100644 (file)
@@ -140,7 +140,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
        /*
         * Restore pstate flags. OS lock and mdscr have been already
         * restored, so from this point onwards, debugging is fully
-        * renabled if it was enabled when core started shutdown.
+        * reenabled if it was enabled when core started shutdown.
         */
        local_daif_restore(flags);
 
index 0d19259454d8c583d184ec6ab21cda3f43a19cf0..53ae2c0640bc2ab1458b26863c6ba7388fbf635a 100644 (file)
@@ -1079,7 +1079,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        gfn_t gfn;
        kvm_pfn_t pfn;
        bool logging_active = memslot_is_logging(memslot);
-       bool logging_perm_fault = false;
+       bool use_read_lock = false;
        unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
        unsigned long vma_pagesize, fault_granule;
        enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
@@ -1114,7 +1114,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        if (logging_active) {
                force_pte = true;
                vma_shift = PAGE_SHIFT;
-               logging_perm_fault = (fault_status == FSC_PERM && write_fault);
+               use_read_lock = (fault_status == FSC_PERM && write_fault &&
+                                fault_granule == PAGE_SIZE);
        } else {
                vma_shift = get_vma_page_shift(vma, hva);
        }
@@ -1218,7 +1219,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * logging dirty logging, only acquire read lock for permission
         * relaxation.
         */
-       if (logging_perm_fault)
+       if (use_read_lock)
                read_lock(&kvm->mmu_lock);
        else
                write_lock(&kvm->mmu_lock);
@@ -1268,6 +1269,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        if (fault_status == FSC_PERM && vma_pagesize == fault_granule) {
                ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
        } else {
+               WARN_ONCE(use_read_lock, "Attempted stage-2 map outside of write lock\n");
+
                ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
                                             __pfn_to_phys(pfn), prot,
                                             memcache);
@@ -1280,7 +1283,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        }
 
 out_unlock:
-       if (logging_perm_fault)
+       if (use_read_lock)
                read_unlock(&kvm->mmu_lock);
        else
                write_unlock(&kvm->mmu_lock);
index 372da09a2fab62f824ff016c7ac69787a33d359e..baac2b405f2356418d56be0ea1ac1c845e4c6675 100644 (file)
@@ -215,15 +215,11 @@ static void kvm_psci_narrow_to_32bit(struct kvm_vcpu *vcpu)
 
 static unsigned long kvm_psci_check_allowed_function(struct kvm_vcpu *vcpu, u32 fn)
 {
-       switch(fn) {
-       case PSCI_0_2_FN64_CPU_SUSPEND:
-       case PSCI_0_2_FN64_CPU_ON:
-       case PSCI_0_2_FN64_AFFINITY_INFO:
-               /* Disallow these functions for 32bit guests */
-               if (vcpu_mode_is_32bit(vcpu))
-                       return PSCI_RET_NOT_SUPPORTED;
-               break;
-       }
+       /*
+        * Prevent 32 bit guests from calling 64 bit PSCI functions.
+        */
+       if ((fn & PSCI_0_2_64BIT) && vcpu_mode_is_32bit(vcpu))
+               return PSCI_RET_NOT_SUPPORTED;
 
        return 0;
 }
@@ -235,10 +231,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
        unsigned long val;
        int ret = 1;
 
-       val = kvm_psci_check_allowed_function(vcpu, psci_fn);
-       if (val)
-               goto out;
-
        switch (psci_fn) {
        case PSCI_0_2_FN_PSCI_VERSION:
                /*
@@ -306,7 +298,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
                break;
        }
 
-out:
        smccc_set_retval(vcpu, val, 0, 0, 0);
        return ret;
 }
@@ -318,9 +309,6 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor)
        unsigned long val;
        int ret = 1;
 
-       if (minor > 1)
-               return -EINVAL;
-
        switch(psci_fn) {
        case PSCI_0_2_FN_PSCI_VERSION:
                val = minor == 0 ? KVM_ARM_PSCI_1_0 : KVM_ARM_PSCI_1_1;
@@ -426,6 +414,15 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
  */
 int kvm_psci_call(struct kvm_vcpu *vcpu)
 {
+       u32 psci_fn = smccc_get_function(vcpu);
+       unsigned long val;
+
+       val = kvm_psci_check_allowed_function(vcpu, psci_fn);
+       if (val) {
+               smccc_set_retval(vcpu, val, 0, 0, 0);
+               return 1;
+       }
+
        switch (kvm_psci_version(vcpu)) {
        case KVM_ARM_PSCI_1_1:
                return kvm_psci_1_x_call(vcpu, 1);
index ecc40c8cd6f643301c4c3a76f9eb6933ca8350e7..6c70c6f61c7038fb180a6c4e5b8c3c943c8b9a55 100644 (file)
@@ -181,27 +181,51 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
+/**
+ * kvm_set_vm_width() - set the register width for the guest
+ * @vcpu: Pointer to the vcpu being configured
+ *
+ * Set both KVM_ARCH_FLAG_EL1_32BIT and KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED
+ * in the VM flags based on the vcpu's requested register width, the HW
+ * capabilities and other options (such as MTE).
+ * When REG_WIDTH_CONFIGURED is already set, the vcpu settings must be
+ * consistent with the value of the FLAG_EL1_32BIT bit in the flags.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int kvm_set_vm_width(struct kvm_vcpu *vcpu)
 {
-       struct kvm_vcpu *tmp;
+       struct kvm *kvm = vcpu->kvm;
        bool is32bit;
-       unsigned long i;
 
        is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
+
+       lockdep_assert_held(&kvm->lock);
+
+       if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
+               /*
+                * The guest's register width is already configured.
+                * Make sure that the vcpu is consistent with it.
+                */
+               if (is32bit == test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags))
+                       return 0;
+
+               return -EINVAL;
+       }
+
        if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
-               return false;
+               return -EINVAL;
 
        /* MTE is incompatible with AArch32 */
-       if (kvm_has_mte(vcpu->kvm) && is32bit)
-               return false;
+       if (kvm_has_mte(kvm) && is32bit)
+               return -EINVAL;
 
-       /* Check that the vcpus are either all 32bit or all 64bit */
-       kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
-               if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit)
-                       return false;
-       }
+       if (is32bit)
+               set_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
 
-       return true;
+       set_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags);
+
+       return 0;
 }
 
 /**
@@ -230,10 +254,16 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
        u32 pstate;
 
        mutex_lock(&vcpu->kvm->lock);
-       reset_state = vcpu->arch.reset_state;
-       WRITE_ONCE(vcpu->arch.reset_state.reset, false);
+       ret = kvm_set_vm_width(vcpu);
+       if (!ret) {
+               reset_state = vcpu->arch.reset_state;
+               WRITE_ONCE(vcpu->arch.reset_state.reset, false);
+       }
        mutex_unlock(&vcpu->kvm->lock);
 
+       if (ret)
+               return ret;
+
        /* Reset PMU outside of the non-preemptible section */
        kvm_pmu_vcpu_reset(vcpu);
 
@@ -260,14 +290,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
                }
        }
 
-       if (!vcpu_allowed_register_width(vcpu)) {
-               ret = -EINVAL;
-               goto out;
-       }
-
        switch (vcpu->arch.target) {
        default:
-               if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
+               if (vcpu_el1_is_32bit(vcpu)) {
                        pstate = VCPU_RESET_PSTATE_SVC;
                } else {
                        pstate = VCPU_RESET_PSTATE_EL1;
index f38c40a762519f2d10b6fb8b9f26b3294b78f39d..78cde687383ca8cd1c6396701deb0dfc552bddf5 100644 (file)
@@ -82,7 +82,7 @@ static bool end_of_vgic(struct vgic_state_iter *iter)
 
 static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
 {
-       struct kvm *kvm = (struct kvm *)s->private;
+       struct kvm *kvm = s->private;
        struct vgic_state_iter *iter;
 
        mutex_lock(&kvm->lock);
@@ -110,7 +110,7 @@ out:
 
 static void *vgic_debug_next(struct seq_file *s, void *v, loff_t *pos)
 {
-       struct kvm *kvm = (struct kvm *)s->private;
+       struct kvm *kvm = s->private;
        struct vgic_state_iter *iter = kvm->arch.vgic.iter;
 
        ++*pos;
@@ -122,7 +122,7 @@ static void *vgic_debug_next(struct seq_file *s, void *v, loff_t *pos)
 
 static void vgic_debug_stop(struct seq_file *s, void *v)
 {
-       struct kvm *kvm = (struct kvm *)s->private;
+       struct kvm *kvm = s->private;
        struct vgic_state_iter *iter;
 
        /*
@@ -229,8 +229,8 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
 
 static int vgic_debug_show(struct seq_file *s, void *v)
 {
-       struct kvm *kvm = (struct kvm *)s->private;
-       struct vgic_state_iter *iter = (struct vgic_state_iter *)v;
+       struct kvm *kvm = s->private;
+       struct vgic_state_iter *iter = v;
        struct vgic_irq *irq;
        struct kvm_vcpu *vcpu = NULL;
        unsigned long flags;
index 089fc2ffcb43d1c6c1269036762edf669068ad24..2e13402be3bd24654962c3eccb0c0ab56d5cd420 100644 (file)
@@ -2143,7 +2143,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
 static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
                                void *ptr, void *opaque)
 {
-       struct its_device *dev = (struct its_device *)opaque;
+       struct its_device *dev = opaque;
        struct its_collection *collection;
        struct kvm *kvm = its->dev->kvm;
        struct kvm_vcpu *vcpu = NULL;
index 8ac25f19084e89891d9629cfcf8f7d82c0464584..1e7b1550e2fcebbea89f1443cb8daa5f009b4498 100644 (file)
@@ -73,7 +73,7 @@ EXPORT_SYMBOL(memstart_addr);
  * In this scheme a comparatively quicker boot is observed.
  *
  * If ZONE_DMA configs are defined, crash kernel memory reservation
- * is delayed until DMA zone memory range size initilazation performed in
+ * is delayed until DMA zone memory range size initialization performed in
  * zone_sizes_init().  The defer is necessary to steer clear of DMA zone
  * memory range to avoid overlap allocation.  So crash kernel memory boundaries
  * are not known when mapping all bank memory ranges, which otherwise means
@@ -81,7 +81,7 @@ EXPORT_SYMBOL(memstart_addr);
  * so page-granularity mappings are created for the entire memory range.
  * Hence a slightly slower boot is observed.
  *
- * Note: Page-granularity mapppings are necessary for crash kernel memory
+ * Note: Page-granularity mappings are necessary for crash kernel memory
  * range for shrinking its size via /sys/kernel/kexec_crash_size interface.
  */
 #if IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32)
index 2298909f24c633da17559f65dc3e0ede2e677b88..161653d84b340de9df12956e232287f6d9c11dd3 100644 (file)
@@ -67,9 +67,5 @@ struct user {
        unsigned long magic;            /* To uniquely identify a core file */
        char u_comm[32];                /* User command that was responsible */
 };
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
 
 #endif
index 0ba486651b7cb0b4a3ce362ece2cb701bd559665..ec03d3ab87152fe40c1191b3441f055ac8acb5f9 100644 (file)
@@ -50,10 +50,4 @@ struct user {
        char            u_comm[32];             /* user command name */
 };
 
-#define NBPG                   PAGE_SIZE
-#define UPAGES                 1
-#define HOST_TEXT_START_ADDR   (u.start_code)
-#define HOST_DATA_START_ADDR   (u.start_data)
-#define HOST_STACK_END_ADDR    (u.start_stack + u.u_ssize * NBPG)
-
 #endif /* _ASM_IA64_USER_H */
index 14f40ecf8b65714d822a0b2a297ea35980d555f0..d009f927a0482bc9b98266d5113f60ad94068c5a 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index 509d555977c8adae9cc11c0d6d051d1170ff5e58..61413bff613a686d0c830cdfe3f32dd8855798c8 100644 (file)
@@ -79,9 +79,5 @@ struct user{
   unsigned long magic;         /* To uniquely identify a core file */
   char u_comm[32];             /* User command that was responsible */
 };
-#define NBPG 4096
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
 
 #endif
index 6713c65a25e15cfe96824cbcdba6bb8f222ef274..b265e4bc16c2e0bc52c63819fda704ebda00de31 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index cff570a719461ffd34ffec2e93d0ddaea0776de6..2b42c370d57427839d394c475bb6ec8a4471726f 100644 (file)
@@ -29,7 +29,7 @@ $(obj)/simpleImage.$(DTB).ub: $(obj)/simpleImage.$(DTB) FORCE
        $(call if_changed,uimage)
 
 $(obj)/simpleImage.$(DTB).unstrip: vmlinux FORCE
-       $(call if_changed,shipped)
+       $(call if_changed,copy)
 
 $(obj)/simpleImage.$(DTB).strip: vmlinux FORCE
        $(call if_changed,strip)
index ef00dd30d19a2cf74720e27aabe7d15a46368a06..b84e2cbb20eef0a711a688f4d3249ce206368e94 100644 (file)
@@ -12,7 +12,7 @@ $(obj)/linked_dtb.o: $(obj)/system.dtb
 # Generate system.dtb from $(DTB).dtb
 ifneq ($(DTB),system)
 $(obj)/system.dtb: $(obj)/$(DTB).dtb
-       $(call if_changed,shipped)
+       $(call if_changed,copy)
 endif
 endif
 
index 6713c65a25e15cfe96824cbcdba6bb8f222ef274..b265e4bc16c2e0bc52c63819fda704ebda00de31 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index 0a03529cf3178d36abf06d6a9355f3f0ca066eba..3e4f5ba104f89a42fddd04a70e85f947d9ba80d4 100644 (file)
@@ -28,7 +28,7 @@ enum crc_type {
 };
 
 #ifndef TOOLCHAIN_SUPPORTS_CRC
-#define _ASM_MACRO_CRC32(OP, SZ, TYPE)                                   \
+#define _ASM_SET_CRC(OP, SZ, TYPE)                                       \
 _ASM_MACRO_3R(OP, rt, rs, rt2,                                           \
        ".ifnc  \\rt, \\rt2\n\t"                                          \
        ".error \"invalid operands \\\"" #OP " \\rt,\\rs,\\rt2\\\"\"\n\t" \
@@ -37,30 +37,36 @@ _ASM_MACRO_3R(OP, rt, rs, rt2,                                                \
                          ((SZ) <<  6) | ((TYPE) << 8))                   \
        _ASM_INSN32_IF_MM(0x00000030 | (__rs << 16) | (__rt << 21) |      \
                          ((SZ) << 14) | ((TYPE) << 3)))
-_ASM_MACRO_CRC32(crc32b,  0, 0);
-_ASM_MACRO_CRC32(crc32h,  1, 0);
-_ASM_MACRO_CRC32(crc32w,  2, 0);
-_ASM_MACRO_CRC32(crc32d,  3, 0);
-_ASM_MACRO_CRC32(crc32cb, 0, 1);
-_ASM_MACRO_CRC32(crc32ch, 1, 1);
-_ASM_MACRO_CRC32(crc32cw, 2, 1);
-_ASM_MACRO_CRC32(crc32cd, 3, 1);
-#define _ASM_SET_CRC ""
+#define _ASM_UNSET_CRC(op, SZ, TYPE) ".purgem " #op "\n\t"
 #else /* !TOOLCHAIN_SUPPORTS_CRC */
-#define _ASM_SET_CRC ".set\tcrc\n\t"
+#define _ASM_SET_CRC(op, SZ, TYPE) ".set\tcrc\n\t"
+#define _ASM_UNSET_CRC(op, SZ, TYPE)
 #endif
 
-#define _CRC32(crc, value, size, type)         \
-do {                                           \
-       __asm__ __volatile__(                   \
-               ".set   push\n\t"               \
-               _ASM_SET_CRC                    \
-               #type #size "   %0, %1, %0\n\t" \
-               ".set   pop"                    \
-               : "+r" (crc)                    \
-               : "r" (value));                 \
+#define __CRC32(crc, value, op, SZ, TYPE)              \
+do {                                                   \
+       __asm__ __volatile__(                           \
+               ".set   push\n\t"                       \
+               _ASM_SET_CRC(op, SZ, TYPE)              \
+               #op "   %0, %1, %0\n\t"                 \
+               _ASM_UNSET_CRC(op, SZ, TYPE)            \
+               ".set   pop"                            \
+               : "+r" (crc)                            \
+               : "r" (value));                         \
 } while (0)
 
+#define _CRC32_crc32b(crc, value)      __CRC32(crc, value, crc32b, 0, 0)
+#define _CRC32_crc32h(crc, value)      __CRC32(crc, value, crc32h, 1, 0)
+#define _CRC32_crc32w(crc, value)      __CRC32(crc, value, crc32w, 2, 0)
+#define _CRC32_crc32d(crc, value)      __CRC32(crc, value, crc32d, 3, 0)
+#define _CRC32_crc32cb(crc, value)     __CRC32(crc, value, crc32cb, 0, 1)
+#define _CRC32_crc32ch(crc, value)     __CRC32(crc, value, crc32ch, 1, 1)
+#define _CRC32_crc32cw(crc, value)     __CRC32(crc, value, crc32cw, 2, 1)
+#define _CRC32_crc32cd(crc, value)     __CRC32(crc, value, crc32cd, 3, 1)
+
+#define _CRC32(crc, value, size, op) \
+       _CRC32_##op##size(crc, value)
+
 #define CRC32(crc, value, size) \
        _CRC32(crc, value, size, crc32)
 
index 34d179ca020ba2a5bee5a43790eeb5fccb487548..dd9d4b026e6239bcc3c3344de2fc1c39d971406d 100644 (file)
 #define DEV3TC         0x01003C
 #define BTCS           0x010040
 #define BTCOMPARE      0x010044
-#define GPIOBASE       0x050000
-/* Offsets relative to GPIOBASE */
-#define GPIOFUNC       0x00
-#define GPIOCFG                0x04
-#define GPIOD          0x08
-#define GPIOILEVEL     0x0C
-#define GPIOISTAT      0x10
-#define GPIONMIEN      0x14
-#define IMASK6         0x38
 #define LO_WPX         (1 << 0)
 #define LO_ALE         (1 << 1)
 #define LO_CLE         (1 << 2)
index 10bf90dc02c0fff3195ab48cc68e559feb187f01..e6b21de65cca428308eb08cca193714388f7598e 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syshdr := $(srctree)/scripts/syscallhdr.sh
 sysnr := $(srctree)/$(src)/syscallnr.sh
index 64726c670ca6440030520cc798efbb0d8e408357..5204fc6d6d502faf97c230b429d62dcd8d61592a 100644 (file)
@@ -167,6 +167,8 @@ static inline void clkdev_add_sys(const char *dev, unsigned int module,
 {
        struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
 
+       if (!clk)
+               return;
        clk->cl.dev_id = dev;
        clk->cl.con_id = NULL;
        clk->cl.clk = clk;
index 3d5683e75cf1e67746e5f8e463da34c5844b5bda..200fe9ff641d67fb256815a54af57be7ea38e0a6 100644 (file)
@@ -122,6 +122,8 @@ static inline void clkdev_add_gptu(struct device *dev, const char *con,
 {
        struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
 
+       if (!clk)
+               return;
        clk->cl.dev_id = dev_name(dev);
        clk->cl.con_id = con;
        clk->cl.clk = clk;
index 917fac1636b7151d69c54045307cec7c32a17b8e..084f6caba5f23754bf5f2b892a87c17d325ae720 100644 (file)
@@ -315,6 +315,8 @@ static void clkdev_add_pmu(const char *dev, const char *con, bool deactivate,
 {
        struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
 
+       if (!clk)
+               return;
        clk->cl.dev_id = dev;
        clk->cl.con_id = con;
        clk->cl.clk = clk;
@@ -338,6 +340,8 @@ static void clkdev_add_cgu(const char *dev, const char *con,
 {
        struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
 
+       if (!clk)
+               return;
        clk->cl.dev_id = dev;
        clk->cl.con_id = con;
        clk->cl.clk = clk;
@@ -356,24 +360,28 @@ static void clkdev_add_pci(void)
        struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL);
 
        /* main pci clock */
-       clk->cl.dev_id = "17000000.pci";
-       clk->cl.con_id = NULL;
-       clk->cl.clk = clk;
-       clk->rate = CLOCK_33M;
-       clk->rates = valid_pci_rates;
-       clk->enable = pci_enable;
-       clk->disable = pmu_disable;
-       clk->module = 0;
-       clk->bits = PMU_PCI;
-       clkdev_add(&clk->cl);
+       if (clk) {
+               clk->cl.dev_id = "17000000.pci";
+               clk->cl.con_id = NULL;
+               clk->cl.clk = clk;
+               clk->rate = CLOCK_33M;
+               clk->rates = valid_pci_rates;
+               clk->enable = pci_enable;
+               clk->disable = pmu_disable;
+               clk->module = 0;
+               clk->bits = PMU_PCI;
+               clkdev_add(&clk->cl);
+       }
 
        /* use internal/external bus clock */
-       clk_ext->cl.dev_id = "17000000.pci";
-       clk_ext->cl.con_id = "external";
-       clk_ext->cl.clk = clk_ext;
-       clk_ext->enable = pci_ext_enable;
-       clk_ext->disable = pci_ext_disable;
-       clkdev_add(&clk_ext->cl);
+       if (clk_ext) {
+               clk_ext->cl.dev_id = "17000000.pci";
+               clk_ext->cl.con_id = "external";
+               clk_ext->cl.clk = clk_ext;
+               clk_ext->enable = pci_ext_enable;
+               clk_ext->disable = pci_ext_disable;
+               clkdev_add(&clk_ext->cl);
+       }
 }
 
 /* xway socs can generate clocks on gpio pins */
@@ -393,9 +401,15 @@ static void clkdev_add_clkout(void)
                char *name;
 
                name = kzalloc(sizeof("clkout0"), GFP_KERNEL);
+               if (!name)
+                       continue;
                sprintf(name, "clkout%d", i);
 
                clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+               if (!clk) {
+                       kfree(name);
+                       continue;
+               }
                clk->cl.dev_id = "1f103000.cgu";
                clk->cl.con_id = name;
                clk->cl.clk = clk;
index 94f02ada4082622d72dac89e2d89463217be6f20..29c21b9d42dabf55aadf7f57f2ced10b84a6d746 100644 (file)
 #include <asm/mach-rc32434/rb.h>
 #include <asm/mach-rc32434/gpio.h>
 
+#define GPIOBASE       0x050000
+/* Offsets relative to GPIOBASE */
+#define GPIOFUNC       0x00
+#define GPIOCFG                0x04
+#define GPIOD          0x08
+#define GPIOILEVEL     0x0C
+#define GPIOISTAT      0x10
+#define GPIONMIEN      0x14
+#define IMASK6         0x38
+
 struct rb532_gpio_chip {
        struct gpio_chip chip;
        void __iomem     *regbase;
index dfc52f661ad068d065c2ca08992c3d929db5d9cd..38d12f417e4821c1de4bd0ae29d663c638fe65ad 100644 (file)
@@ -363,6 +363,8 @@ static void ip22_check_gio(int slotno, unsigned long addr, int irq)
                printk(KERN_INFO "GIO: slot %d : %s (id %x)\n",
                       slotno, name, id);
                gio_dev = kzalloc(sizeof *gio_dev, GFP_KERNEL);
+               if (!gio_dev)
+                       return;
                gio_dev->name = name;
                gio_dev->slotno = slotno;
                gio_dev->id.id = id;
index d63f18dd058d8cc81c18b37e82dd26fa8700e7b9..8440c16dfb225b3f30d668c3c855d1c015fbd259 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index 827038a33064b9b5703d617bdb23dc8cf1e6b04e..4def2bd17b9b865fc410866876fb5e43860afb1f 100644 (file)
 #include <asm/ppc-opcode.h>
 #include <asm/pte-walk.h>
 
-#ifdef CONFIG_PPC_PSERIES
-static inline bool kvmhv_on_pseries(void)
-{
-       return !cpu_has_feature(CPU_FTR_HVMODE);
-}
-#else
-static inline bool kvmhv_on_pseries(void)
-{
-       return false;
-}
-#endif
-
 /*
  * Structure for a nested guest, that is, for a guest that is managed by
  * one of our guests.
index c583d0c37f319247471811960d9b6e534ace6b23..838d4cb460b7edf6b9a820a509e763e63298b0fe 100644 (file)
@@ -586,6 +586,18 @@ static inline bool kvm_hv_mode_active(void)                { return false; }
 
 #endif
 
+#ifdef CONFIG_PPC_PSERIES
+static inline bool kvmhv_on_pseries(void)
+{
+       return !cpu_has_feature(CPU_FTR_HVMODE);
+}
+#else
+static inline bool kvmhv_on_pseries(void)
+{
+       return false;
+}
+#endif
+
 #ifdef CONFIG_KVM_XICS
 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
 {
index 254687258f42b1673d4becc5ff213a32db18bb24..f2c5c26869f1a43888681bdee0633aab3cbf3784 100644 (file)
@@ -132,7 +132,11 @@ static inline bool pfn_valid(unsigned long pfn)
 #define virt_to_page(kaddr)    pfn_to_page(virt_to_pfn(kaddr))
 #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
 
-#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
+#define virt_addr_valid(vaddr) ({                                      \
+       unsigned long _addr = (unsigned long)vaddr;                     \
+       _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory &&   \
+       pfn_valid(virt_to_pfn(_addr));                                  \
+})
 
 /*
  * On Book-E parts we need __va to parse the device tree and we can't
index 049ca26893e61033f3319b632ae3f4410fd2fe0d..8fa37ef5da4d177cc80e9c425a16aec31edb5a88 100644 (file)
@@ -28,11 +28,13 @@ void setup_panic(void);
 #define ARCH_PANIC_TIMEOUT 180
 
 #ifdef CONFIG_PPC_PSERIES
+extern bool pseries_reloc_on_exception(void);
 extern bool pseries_enable_reloc_on_exc(void);
 extern void pseries_disable_reloc_on_exc(void);
 extern void pseries_big_endian_exceptions(void);
 void __init pseries_little_endian_exceptions(void);
 #else
+static inline bool pseries_reloc_on_exception(void) { return false; }
 static inline bool pseries_enable_reloc_on_exc(void) { return false; }
 static inline void pseries_disable_reloc_on_exc(void) {}
 static inline void pseries_big_endian_exceptions(void) {}
index 0a0bc79bd1fa951337aa6ecbf5ba8ef705507e4d..de1018cc522b376fe958f561f26305c3a98fb2c4 100644 (file)
@@ -24,5 +24,6 @@
 
 #define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func)      __PPC_SCT(name, "b " #func)
 #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)       __PPC_SCT(name, "blr")
+#define ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)       __PPC_SCT(name, "b .+20")
 
 #endif /* _ASM_POWERPC_STATIC_CALL_H */
index 99443b8594e7e7fec9ec1e4384bcf25193debd48..7fae7e597ba4922b5636bf852aad7a8356c257dd 100644 (file)
@@ -44,9 +44,4 @@ struct user {
        char            u_comm[32];             /* user command name */
 };
 
-#define NBPG                   PAGE_SIZE
-#define UPAGES                 1
-#define HOST_TEXT_START_ADDR   (u.start_code)
-#define HOST_DATA_START_ADDR   (u.start_data)
-#define HOST_STACK_END_ADDR    (u.start_stack + u.u_ssize * NBPG)
 #endif /* _ASM_POWERPC_USER_H */
index 55caeee37c087190bee29126242ca79813d92bb0..b66dd6f775a4079f078b97b7d91f02e47b328298 100644 (file)
@@ -809,6 +809,10 @@ __start_interrupts:
  * - MSR_EE|MSR_RI is clear (no reentrant exceptions)
  * - Standard kernel environment is set up (stack, paca, etc)
  *
+ * KVM:
+ * These interrupts do not elevate HV 0->1, so HV is not involved. PR KVM
+ * ensures that FSCR[SCV] is disabled whenever it has to force AIL off.
+ *
  * Call convention:
  *
  * syscall register convention is in Documentation/powerpc/syscall64-abi.rst
index 40a583e9d3c70b02e630c927c14d5417d7773976..97a76a8619fbd47bac406a3f23830ce04a13723a 100644 (file)
@@ -101,7 +101,7 @@ __module_alloc(unsigned long size, unsigned long start, unsigned long end, bool
         * too.
         */
        return __vmalloc_node_range(size, 1, start, end, gfp, prot,
-                                   VM_FLUSH_RESET_PERMS | VM_NO_HUGE_VMAP,
+                                   VM_FLUSH_RESET_PERMS,
                                    NUMA_NO_NODE, __builtin_return_address(0));
 }
 
index e547066a06aa6860bf39f5404acdef820af4632f..a96f05063bc9108da0770877ad3b2b86291f8c90 100644 (file)
@@ -196,6 +196,34 @@ static void __init configure_exceptions(void)
 
        /* Under a PAPR hypervisor, we need hypercalls */
        if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
+               /*
+                * - PR KVM does not support AIL mode interrupts in the host
+                *   while a PR guest is running.
+                *
+                * - SCV system call interrupt vectors are only implemented for
+                *   AIL mode interrupts.
+                *
+                * - On pseries, AIL mode can only be enabled and disabled
+                *   system-wide so when a PR VM is created on a pseries host,
+                *   all CPUs of the host are set to AIL=0 mode.
+                *
+                * - Therefore host CPUs must not execute scv while a PR VM
+                *   exists.
+                *
+                * - SCV support can not be disabled dynamically because the
+                *   feature is advertised to host userspace. Disabling the
+                *   facility and emulating it would be possible but is not
+                *   implemented.
+                *
+                * - So SCV support is blanket disabled if PR KVM could possibly
+                *   run. That is, PR support compiled in, booting on pseries
+                *   with hash MMU.
+                */
+               if (IS_ENABLED(CONFIG_KVM_BOOK3S_PR_POSSIBLE) && !radix_enabled()) {
+                       init_task.thread.fscr &= ~FSCR_SCV;
+                       cur_cpu_spec->cpu_user_features2 &= ~PPC_FEATURE2_SCV;
+               }
+
                /* Enable AIL if possible */
                if (!pseries_enable_reloc_on_exc()) {
                        init_task.thread.fscr &= ~FSCR_SCV;
index 5476f62eb80f96ab77933e183c4f370c5b24d3d5..9d7bd81510b833041ae984405dc6100f06bc8a48 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index 18e58085447cb3168ef519839db8868f1f475792..ddd88179110a096f3cb06ec1b92aed6715754001 100644 (file)
@@ -112,12 +112,21 @@ config KVM_BOOK3S_64_PR
          guest in user mode (problem state) and emulating all
          privileged instructions and registers.
 
+         This is only available for hash MMU mode and only supports
+         guests that use hash MMU mode.
+
          This is not as fast as using hypervisor mode, but works on
          machines where hypervisor mode is not available or not usable,
          and can emulate processors that are different from the host
          processor, including emulating 32-bit processors on a 64-bit
          host.
 
+         Selecting this option will cause the SCV facility to be
+         disabled when the kernel is booted on the pseries platform in
+         hash MMU mode (regardless of PR VMs running). When any PR VMs
+         are running, "AIL" mode is disabled which may slow interrupts
+         and system calls on the host.
+
 config KVM_BOOK3S_HV_EXIT_TIMING
        bool "Detailed timing for hypervisor real-mode code"
        depends on KVM_BOOK3S_HV_POSSIBLE && DEBUG_FS
index 05e003eb5d9063855f412b08bc519e9fa679f6c8..e42d1c609e4767f04318f2b586b88045c6e0acaa 100644 (file)
@@ -414,10 +414,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
         */
        ld      r10,HSTATE_SCRATCH0(r13)
        cmpwi   r10,BOOK3S_INTERRUPT_MACHINE_CHECK
-       beq     machine_check_common
+       beq     .Lcall_machine_check_common
 
        cmpwi   r10,BOOK3S_INTERRUPT_SYSTEM_RESET
-       beq     system_reset_common
+       beq     .Lcall_system_reset_common
 
        b       .
+
+.Lcall_machine_check_common:
+       b       machine_check_common
+
+.Lcall_system_reset_common:
+       b       system_reset_common
 #endif
index c886557638a15b16c53df54de0a9b8a5754ffa7a..6fa518f6501d513076200e994c25ef4cbd594a35 100644 (file)
@@ -225,6 +225,13 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
        int cpu;
        struct rcuwait *waitp;
 
+       /*
+        * rcuwait_wake_up contains smp_mb() which orders prior stores that
+        * create pending work vs below loads of cpu fields. The other side
+        * is the barrier in vcpu run that orders setting the cpu fields vs
+        * testing for pending work.
+        */
+
        waitp = kvm_arch_vcpu_get_wait(vcpu);
        if (rcuwait_wake_up(waitp))
                ++vcpu->stat.generic.halt_wakeup;
@@ -1089,7 +1096,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                        break;
                }
                tvcpu->arch.prodded = 1;
-               smp_mb();
+               smp_mb(); /* This orders prodded store vs ceded load */
                if (tvcpu->arch.ceded)
                        kvmppc_fast_vcpu_kick_hv(tvcpu);
                break;
@@ -3766,6 +3773,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
                pvc = core_info.vc[sub];
                pvc->pcpu = pcpu + thr;
                for_each_runnable_thread(i, vcpu, pvc) {
+                       /*
+                        * XXX: is kvmppc_start_thread called too late here?
+                        * It updates vcpu->cpu and vcpu->arch.thread_cpu
+                        * which are used by kvmppc_fast_vcpu_kick_hv(), but
+                        * kick is called after new exceptions become available
+                        * and exceptions are checked earlier than here, by
+                        * kvmppc_core_prepare_to_enter.
+                        */
                        kvmppc_start_thread(vcpu, pvc);
                        kvmppc_create_dtl_entry(vcpu, pvc);
                        trace_kvm_guest_enter(vcpu);
@@ -4487,6 +4502,21 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
        if (need_resched() || !kvm->arch.mmu_ready)
                goto out;
 
+       vcpu->cpu = pcpu;
+       vcpu->arch.thread_cpu = pcpu;
+       vc->pcpu = pcpu;
+       local_paca->kvm_hstate.kvm_vcpu = vcpu;
+       local_paca->kvm_hstate.ptid = 0;
+       local_paca->kvm_hstate.fake_suspend = 0;
+
+       /*
+        * Orders set cpu/thread_cpu vs testing for pending interrupts and
+        * doorbells below. The other side is when these fields are set vs
+        * kvmppc_fast_vcpu_kick_hv reading the cpu/thread_cpu fields to
+        * kick a vCPU to notice the pending interrupt.
+        */
+       smp_mb();
+
        if (!nested) {
                kvmppc_core_prepare_to_enter(vcpu);
                if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
@@ -4506,13 +4536,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
 
        tb = mftb();
 
-       vcpu->cpu = pcpu;
-       vcpu->arch.thread_cpu = pcpu;
-       vc->pcpu = pcpu;
-       local_paca->kvm_hstate.kvm_vcpu = vcpu;
-       local_paca->kvm_hstate.ptid = 0;
-       local_paca->kvm_hstate.fake_suspend = 0;
-
        __kvmppc_create_dtl_entry(vcpu, pcpu, tb + vc->tb_offset, 0);
 
        trace_kvm_guest_enter(vcpu);
@@ -4614,6 +4637,8 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
        run->exit_reason = KVM_EXIT_INTR;
        vcpu->arch.ret = -EINTR;
  out:
+       vcpu->cpu = -1;
+       vcpu->arch.thread_cpu = -1;
        powerpc_local_irq_pmu_restore(flags);
        preempt_enable();
        goto done;
index 34a801c3604adcee59ea7641a8ded0d51247dd1a..7bf9e6ca5c2df60506bb2225a92c1914c71d5b26 100644 (file)
@@ -137,12 +137,15 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
        svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
        svcpu->in_use = 0;
        svcpu_put(svcpu);
-#endif
 
        /* Disable AIL if supported */
-       if (cpu_has_feature(CPU_FTR_HVMODE) &&
-           cpu_has_feature(CPU_FTR_ARCH_207S))
-               mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL);
+       if (cpu_has_feature(CPU_FTR_HVMODE)) {
+               if (cpu_has_feature(CPU_FTR_ARCH_207S))
+                       mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL);
+               if (cpu_has_feature(CPU_FTR_ARCH_300) && (current->thread.fscr & FSCR_SCV))
+                       mtspr(SPRN_FSCR, mfspr(SPRN_FSCR) & ~FSCR_SCV);
+       }
+#endif
 
        vcpu->cpu = smp_processor_id();
 #ifdef CONFIG_PPC_BOOK3S_32
@@ -165,6 +168,14 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
        memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
        to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
        svcpu_put(svcpu);
+
+       /* Enable AIL if supported */
+       if (cpu_has_feature(CPU_FTR_HVMODE)) {
+               if (cpu_has_feature(CPU_FTR_ARCH_207S))
+                       mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3);
+               if (cpu_has_feature(CPU_FTR_ARCH_300) && (current->thread.fscr & FSCR_SCV))
+                       mtspr(SPRN_FSCR, mfspr(SPRN_FSCR) | FSCR_SCV);
+       }
 #endif
 
        if (kvmppc_is_split_real(vcpu))
@@ -174,11 +185,6 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
        kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
        kvmppc_save_tm_pr(vcpu);
 
-       /* Enable AIL if supported */
-       if (cpu_has_feature(CPU_FTR_HVMODE) &&
-           cpu_has_feature(CPU_FTR_ARCH_207S))
-               mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3);
-
        vcpu->cpu = -1;
 }
 
@@ -1037,6 +1043,8 @@ static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
 
 void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
 {
+       if (fscr & FSCR_SCV)
+               fscr &= ~FSCR_SCV; /* SCV must not be enabled */
        if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
                /* TAR got dropped, drop it in shadow too */
                kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
index 1f10e7dfcdd05bd3aaff4f49a78ffcf08436c855..dc4f51ac84bc6012a068eb4c9793e344a86b9221 100644 (file)
@@ -281,6 +281,22 @@ static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu)
        return EMULATE_DONE;
 }
 
+static int kvmppc_h_pr_set_mode(struct kvm_vcpu *vcpu)
+{
+       unsigned long mflags = kvmppc_get_gpr(vcpu, 4);
+       unsigned long resource = kvmppc_get_gpr(vcpu, 5);
+
+       if (resource == H_SET_MODE_RESOURCE_ADDR_TRANS_MODE) {
+               /* KVM PR does not provide AIL!=0 to guests */
+               if (mflags == 0)
+                       kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
+               else
+                       kvmppc_set_gpr(vcpu, 3, H_UNSUPPORTED_FLAG_START - 63);
+               return EMULATE_DONE;
+       }
+       return EMULATE_FAIL;
+}
+
 #ifdef CONFIG_SPAPR_TCE_IOMMU
 static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
 {
@@ -384,6 +400,8 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
                return kvmppc_h_pr_logical_ci_load(vcpu);
        case H_LOGICAL_CI_STORE:
                return kvmppc_h_pr_logical_ci_store(vcpu);
+       case H_SET_MODE:
+               return kvmppc_h_pr_set_mode(vcpu);
        case H_XIRR:
        case H_CPPR:
        case H_EOI:
@@ -421,6 +439,7 @@ int kvmppc_hcall_impl_pr(unsigned long cmd)
        case H_CEDE:
        case H_LOGICAL_CI_LOAD:
        case H_LOGICAL_CI_STORE:
+       case H_SET_MODE:
 #ifdef CONFIG_KVM_XICS
        case H_XIRR:
        case H_CPPR:
@@ -447,6 +466,7 @@ static unsigned int default_hcall_list[] = {
        H_BULK_REMOVE,
        H_PUT_TCE,
        H_CEDE,
+       H_SET_MODE,
 #ifdef CONFIG_KVM_XICS
        H_XIRR,
        H_CPPR,
index 9772b176e406b03565273a9fe10d73d72b85ec53..875c30c12db046957bed254f5519a279023e2e8e 100644 (file)
@@ -705,6 +705,23 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                r = 1;
                break;
 #endif
+       case KVM_CAP_PPC_AIL_MODE_3:
+               r = 0;
+               /*
+                * KVM PR, POWER7, and some POWER9s don't support AIL=3 mode.
+                * The POWER9s can support it if the guest runs in hash mode,
+                * but QEMU doesn't necessarily query the capability in time.
+                */
+               if (hv_enabled) {
+                       if (kvmhv_on_pseries()) {
+                               if (pseries_reloc_on_exception())
+                                       r = 1;
+                       } else if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
+                                 !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
+                               r = 1;
+                       }
+               }
+               break;
        default:
                r = 0;
                break;
index 8e301cd8925b2bde870fd8cddd933a6128b4924e..4d221d033804ef89638e1fd199e5a8fc8c397e69 100644 (file)
@@ -255,7 +255,7 @@ void __init mem_init(void)
 #endif
 
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
-       set_max_mapnr(max_low_pfn);
+       set_max_mapnr(max_pfn);
 
        kasan_late_init();
 
index b9b7fefbb64b9c7298ac718da6864b8b5b4c6a32..13022d734951b99e312a8074658d66b044558c6a 100644 (file)
@@ -1436,7 +1436,7 @@ int find_and_online_cpu_nid(int cpu)
        if (new_nid < 0 || !node_possible(new_nid))
                new_nid = first_online_node;
 
-       if (NODE_DATA(new_nid) == NULL) {
+       if (!node_online(new_nid)) {
 #ifdef CONFIG_MEMORY_HOTPLUG
                /*
                 * Need to ensure that NODE_DATA is initialized for a node from
index 069d7b3bb142ef58fb9afde20b2905955d204364..955ff8aa1644d03da0a53b6ca7e2f2a465d737ba 100644 (file)
@@ -353,6 +353,14 @@ static void pseries_lpar_idle(void)
        pseries_idle_epilog();
 }
 
+static bool pseries_reloc_on_exception_enabled;
+
+bool pseries_reloc_on_exception(void)
+{
+       return pseries_reloc_on_exception_enabled;
+}
+EXPORT_SYMBOL_GPL(pseries_reloc_on_exception);
+
 /*
  * Enable relocation on during exceptions. This has partition wide scope and
  * may take a while to complete, if it takes longer than one second we will
@@ -377,6 +385,7 @@ bool pseries_enable_reloc_on_exc(void)
                                        " on exceptions: %ld\n", rc);
                                return false;
                        }
+                       pseries_reloc_on_exception_enabled = true;
                        return true;
                }
 
@@ -404,7 +413,9 @@ void pseries_disable_reloc_on_exc(void)
                        break;
                mdelay(get_longbusy_msecs(rc));
        }
-       if (rc != H_SUCCESS)
+       if (rc == H_SUCCESS)
+               pseries_reloc_on_exception_enabled = false;
+       else
                pr_warn("Warning: Failed to disable relocation on exceptions: %ld\n",
                        rc);
 }
index 4a7fcde5afc0708e6cce0ca4f31ec893cbcb1795..909535ca513a0a035f919d31a35dc99c470bd07d 100644 (file)
@@ -99,6 +99,7 @@ static struct attribute *vas_def_capab_attrs[] = {
        &nr_used_credits_attribute.attr,
        NULL,
 };
+ATTRIBUTE_GROUPS(vas_def_capab);
 
 static struct attribute *vas_qos_capab_attrs[] = {
        &nr_total_credits_attribute.attr,
@@ -106,6 +107,7 @@ static struct attribute *vas_qos_capab_attrs[] = {
        &update_total_credits_attribute.attr,
        NULL,
 };
+ATTRIBUTE_GROUPS(vas_qos_capab);
 
 static ssize_t vas_type_show(struct kobject *kobj, struct attribute *attr,
                             char *buf)
@@ -154,13 +156,13 @@ static const struct sysfs_ops vas_sysfs_ops = {
 static struct kobj_type vas_def_attr_type = {
                .release        =       vas_type_release,
                .sysfs_ops      =       &vas_sysfs_ops,
-               .default_attrs  =       vas_def_capab_attrs,
+               .default_groups =       vas_def_capab_groups,
 };
 
 static struct kobj_type vas_qos_attr_type = {
                .release        =       vas_type_release,
                .sysfs_ops      =       &vas_sysfs_ops,
-               .default_attrs  =       vas_qos_capab_attrs,
+               .default_groups =       vas_qos_capab_groups,
 };
 
 static char *vas_caps_kobj_name(struct vas_caps_entry *centry,
index ea8ec8a960bda8b5ddc7fa5e4b5a4be613823d29..00fd9c548f2631c6fc537f3e3dfdd28780c3e82a 100644 (file)
@@ -16,6 +16,7 @@ config RISCV
        select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
        select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
        select ARCH_HAS_BINFMT_FLAT
+       select ARCH_HAS_CURRENT_STACK_POINTER
        select ARCH_HAS_DEBUG_VM_PGTABLE
        select ARCH_HAS_DEBUG_VIRTUAL if MMU
        select ARCH_HAS_DEBUG_WX
@@ -47,6 +48,7 @@ config RISCV
        select CLONE_BACKWARDS
        select CLINT_TIMER if !MMU
        select COMMON_CLK
+       select CPU_PM if CPU_IDLE
        select EDAC_SUPPORT
        select GENERIC_ARCH_TOPOLOGY if SMP
        select GENERIC_ATOMIC64 if !64BIT
@@ -533,4 +535,10 @@ source "kernel/power/Kconfig"
 
 endmenu
 
+menu "CPU Power Management"
+
+source "drivers/cpuidle/Kconfig"
+
+endmenu
+
 source "arch/riscv/kvm/Kconfig"
index c112ab2a90520a0b8e6923f3c32900f0cda40020..34592d00dde8c65316b7b4f7325aa7e05fc4ff21 100644 (file)
@@ -36,6 +36,9 @@ config SOC_VIRT
        select GOLDFISH
        select RTC_DRV_GOLDFISH if RTC_CLASS
        select SIFIVE_PLIC
+       select PM_GENERIC_DOMAINS if PM
+       select PM_GENERIC_DOMAINS_OF if PM && OF
+       select RISCV_SBI_CPUIDLE if CPU_IDLE
        help
          This enables support for QEMU Virt Machine.
 
index 984872f3d3a9b9ea0c8d2b1fd1ce927d1c7ac0bf..b9e30df127fefedf8cf9203733fa09d83babbebb 100644 (file)
                compatible = "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <50000000>;
+               spi-tx-bus-width = <4>;
+               spi-rx-bus-width = <4>;
                m25p,fast-read;
                broken-flash-reset;
        };
index 7ba99b4da304218e8a954df13c94190da4250bec..8d23401b0bbb6bcadbc293f45fc9247cbac5586b 100644 (file)
                compatible = "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <50000000>;
+               spi-tx-bus-width = <4>;
+               spi-rx-bus-width = <4>;
                m25p,fast-read;
                broken-flash-reset;
        };
index be9b12c9b374acb38aff0838b824ec9d84496ff6..24fd83b43d9d546e7f673333b61892f68d78457b 100644 (file)
                compatible = "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <50000000>;
+               spi-tx-bus-width = <4>;
+               spi-rx-bus-width = <4>;
                m25p,fast-read;
                broken-flash-reset;
        };
index 031c0c28f81957772d1b38c2811951c11a191145..25341f38292aabf66a2a0b2b1ec70dffc5e004f8 100644 (file)
                compatible = "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <50000000>;
+               spi-tx-bus-width = <4>;
+               spi-rx-bus-width = <4>;
                m25p,fast-read;
                broken-flash-reset;
        };
index 7cd10ded7bf8f6bfb6e93c994bd7dab3bd8113fc..30e3017f22bc778ae25ed2bad6f0b26553de0f9a 100644 (file)
@@ -15,11 +15,14 @@ CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 # CONFIG_SYSFS_SYSCALL is not set
+CONFIG_PROFILING=y
 CONFIG_SOC_MICROCHIP_POLARFIRE=y
 CONFIG_SOC_SIFIVE=y
 CONFIG_SOC_VIRT=y
 CONFIG_SMP=y
 CONFIG_HOTPLUG_CPU=y
+CONFIG_PM=y
+CONFIG_CPU_IDLE=y
 CONFIG_VIRTUALIZATION=y
 CONFIG_KVM=m
 CONFIG_JUMP_LABEL=y
@@ -64,8 +67,6 @@ CONFIG_INPUT_MOUSEDEV=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
-CONFIG_HVC_RISCV_SBI=y
 CONFIG_VIRTIO_CONSOLE=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_VIRTIO=y
index 3f42ed87dde80754447247417a0388860f2a00d5..2438fa39f8ae0022d50c5945033b6d77bfe234dc 100644 (file)
@@ -21,7 +21,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_AIO is not set
 # CONFIG_IO_URING is not set
 # CONFIG_ADVISE_SYSCALLS is not set
-# CONFIG_MEMBARRIER is not set
 # CONFIG_KALLSYMS is not set
 CONFIG_EMBEDDED=y
 # CONFIG_VM_EVENT_COUNTERS is not set
index af64b95e88cc636e8369306a7203023623066343..9a133e63ae5bf3186a2fe9a1fbd91230becb01eb 100644 (file)
@@ -13,7 +13,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_AIO is not set
 # CONFIG_IO_URING is not set
 # CONFIG_ADVISE_SYSCALLS is not set
-# CONFIG_MEMBARRIER is not set
 # CONFIG_KALLSYMS is not set
 CONFIG_EMBEDDED=y
 # CONFIG_VM_EVENT_COUNTERS is not set
index e1c9864b62376dedd8c297e7b4a4a835953ab326..5269fbb6b4fcf650db15478805c445bd9030794f 100644 (file)
@@ -19,7 +19,6 @@ CONFIG_EXPERT=y
 # CONFIG_AIO is not set
 # CONFIG_IO_URING is not set
 # CONFIG_ADVISE_SYSCALLS is not set
-# CONFIG_MEMBARRIER is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_COMPAT_BRK is not set
index e0e5c7c09ab8fc30c6aaa9556b55abba13f2ea60..7e5efdc3829d1165eb44a9a7f58a7f4b60c135fe 100644 (file)
@@ -15,11 +15,14 @@ CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 # CONFIG_SYSFS_SYSCALL is not set
+CONFIG_PROFILING=y
 CONFIG_SOC_SIFIVE=y
 CONFIG_SOC_VIRT=y
 CONFIG_ARCH_RV32I=y
 CONFIG_SMP=y
 CONFIG_HOTPLUG_CPU=y
+CONFIG_PM=y
+CONFIG_CPU_IDLE=y
 CONFIG_VIRTUALIZATION=y
 CONFIG_KVM=m
 CONFIG_JUMP_LABEL=y
@@ -62,8 +65,6 @@ CONFIG_INPUT_MOUSEDEV=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
-CONFIG_HVC_RISCV_SBI=y
 CONFIG_VIRTIO_CONSOLE=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_VIRTIO=y
index 618d7c5af1a2df39e54ef3565dedfa86bd0dc5c4..8c2549b16ac06f7291dee8d2a478734b1e7a3c3b 100644 (file)
 #error "Unexpected __SIZEOF_SHORT__"
 #endif
 
+#ifdef __ASSEMBLY__
+
+/* Common assembly source macros */
+
+#ifdef CONFIG_XIP_KERNEL
+.macro XIP_FIXUP_OFFSET reg
+       REG_L t0, _xip_fixup
+       add \reg, \reg, t0
+.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+       la t1, __data_loc
+       REG_L t1, _xip_phys_offset
+       sub \reg, \reg, t1
+       add \reg, \reg, t0
+.endm
+_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
+_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
+#else
+.macro XIP_FIXUP_OFFSET reg
+.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+.endm
+#endif /* CONFIG_XIP_KERNEL */
+
+#endif /* __ASSEMBLY__ */
+
 #endif /* _ASM_RISCV_ASM_H */
diff --git a/arch/riscv/include/asm/cpuidle.h b/arch/riscv/include/asm/cpuidle.h
new file mode 100644 (file)
index 0000000..71fdc60
--- /dev/null
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Allwinner Ltd
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ */
+
+#ifndef _ASM_RISCV_CPUIDLE_H
+#define _ASM_RISCV_CPUIDLE_H
+
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
+static inline void cpu_do_idle(void)
+{
+       /*
+        * Add mb() here to ensure that all
+        * IO/MEM accesses are completed prior
+        * to entering WFI.
+        */
+       mb();
+       wait_for_interrupt();
+}
+
+#endif
index 1de233d8e8de215d9fd34d3ddbb0312d3617cc0d..21774d868c65bdedaf4f34ab41f4c83456bacc4a 100644 (file)
@@ -33,6 +33,8 @@ static __always_inline struct task_struct *get_current(void)
 
 #define current get_current()
 
+register unsigned long current_stack_pointer __asm__("sp");
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_RISCV_CURRENT_H */
index 4254ff2ff04943f7df7053d6d8263edaba7a3f36..1075beae1ac64521e4c98eadf9acc785deb93d3e 100644 (file)
@@ -2,8 +2,8 @@
 /* Copyright (C) 2017 Andes Technology Corporation */
 #ifdef CONFIG_MODULE_SECTIONS
 SECTIONS {
-       .plt (NOLOAD) : { BYTE(0) }
-       .got (NOLOAD) : { BYTE(0) }
-       .got.plt (NOLOAD) : { BYTE(0) }
+       .plt : { BYTE(0) }
+       .got : { BYTE(0) }
+       .got.plt : { BYTE(0) }
 }
 #endif
diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h
new file mode 100644 (file)
index 0000000..8be391c
--- /dev/null
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#ifndef _ASM_RISCV_SUSPEND_H
+#define _ASM_RISCV_SUSPEND_H
+
+#include <asm/ptrace.h>
+
+struct suspend_context {
+       /* Saved and restored by low-level functions */
+       struct pt_regs regs;
+       /* Saved and restored by high-level functions */
+       unsigned long scratch;
+       unsigned long tvec;
+       unsigned long ie;
+#ifdef CONFIG_MMU
+       unsigned long satp;
+#endif
+};
+
+/* Low-level CPU suspend entry function */
+int __cpu_suspend_enter(struct suspend_context *context);
+
+/* High-level CPU suspend which will save context and call finish() */
+int cpu_suspend(unsigned long arg,
+               int (*finish)(unsigned long arg,
+                             unsigned long entry,
+                             unsigned long context));
+
+/* Low-level CPU resume entry function */
+int __cpu_resume_enter(unsigned long hartid, unsigned long context);
+
+#endif
index 60da0dcacf145c7c7eb5380e6b30869d11f5169b..74d888c8d631a217c6bf4dfe89d06369b09c016a 100644 (file)
 #include <asm/page.h>
 #include <linux/const.h>
 
+#ifdef CONFIG_KASAN
+#define KASAN_STACK_ORDER 1
+#else
+#define KASAN_STACK_ORDER 0
+#endif
+
 /* thread information allocation */
 #ifdef CONFIG_64BIT
-#define THREAD_SIZE_ORDER      (2)
+#define THREAD_SIZE_ORDER      (2 + KASAN_STACK_ORDER)
 #else
-#define THREAD_SIZE_ORDER      (1)
+#define THREAD_SIZE_ORDER      (1 + KASAN_STACK_ORDER)
 #endif
 #define THREAD_SIZE            (PAGE_SIZE << THREAD_SIZE_ORDER)
 
index e0133d113216f2cc0a01d8e95e3cf422a03ac45f..87adbe47bc158075030cfacaa9bcbbb68be1121b 100644 (file)
@@ -48,6 +48,8 @@ obj-$(CONFIG_RISCV_BOOT_SPINWAIT) += cpu_ops_spinwait.o
 obj-$(CONFIG_MODULES)          += module.o
 obj-$(CONFIG_MODULE_SECTIONS)  += module-sections.o
 
+obj-$(CONFIG_CPU_PM)           += suspend_entry.o suspend.o
+
 obj-$(CONFIG_FUNCTION_TRACER)  += mcount.o ftrace.o
 obj-$(CONFIG_DYNAMIC_FTRACE)   += mcount-dyn.o
 
index df0519a64eaf85a44a3e327fbf813d797948c36d..df9444397908d37e1a0478a39ee4f5c3c1d9abcf 100644 (file)
@@ -13,6 +13,7 @@
 #include <asm/thread_info.h>
 #include <asm/ptrace.h>
 #include <asm/cpu_ops_sbi.h>
+#include <asm/suspend.h>
 
 void asm_offsets(void);
 
@@ -113,6 +114,8 @@ void asm_offsets(void)
        OFFSET(PT_BADADDR, pt_regs, badaddr);
        OFFSET(PT_CAUSE, pt_regs, cause);
 
+       OFFSET(SUSPEND_CONTEXT_REGS, suspend_context, regs);
+
        OFFSET(KVM_ARCH_GUEST_ZERO, kvm_vcpu_arch, guest_context.zero);
        OFFSET(KVM_ARCH_GUEST_RA, kvm_vcpu_arch, guest_context.ra);
        OFFSET(KVM_ARCH_GUEST_SP, kvm_vcpu_arch, guest_context.sp);
index d2a936195295b9ffef267495337c7f138e491ba5..ccb617791e561a40f8a05da88bdf61d126a62d20 100644 (file)
@@ -69,11 +69,11 @@ int riscv_of_parent_hartid(struct device_node *node)
                .uprop = #UPROP,                                \
                .isa_ext_id = EXTID,                            \
        }
-/**
+/*
  * Here are the ordering rules of extension naming defined by RISC-V
  * specification :
  * 1. All extensions should be separated from other multi-letter extensions
- *    from other multi-letter extensions by an underscore.
+ *    by an underscore.
  * 2. The first letter following the 'Z' conventionally indicates the most
  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
  *    If multiple 'Z' extensions are named, they should be ordered first
@@ -110,7 +110,7 @@ static void print_isa_ext(struct seq_file *f)
        }
 }
 
-/**
+/*
  * These are the only valid base (single letter) ISA extensions as per the spec.
  * It also specifies the canonical order in which it appears in the spec.
  * Some of the extension may just be a place holder for now (B, K, P, J).
index 2e16f6732cdf8fba7232942f8238bc82ed26a618..4f5a6f84e2a4f30e1ba373b6a4e30f255954fa8d 100644 (file)
@@ -21,7 +21,7 @@ const struct cpu_operations cpu_ops_sbi;
  * be invoked from multiple threads in parallel. Define a per cpu data
  * to handle that.
  */
-DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data);
+static DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data);
 
 static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr,
                              unsigned long priv)
index ec07f991866a55d9ccad2643f045eed78ecc8f89..893b8bb693912e1c92cb2bd908a26a98a14db40b 100644 (file)
 #include <asm/image.h>
 #include "efi-header.S"
 
-#ifdef CONFIG_XIP_KERNEL
-.macro XIP_FIXUP_OFFSET reg
-       REG_L t0, _xip_fixup
-       add \reg, \reg, t0
-.endm
-.macro XIP_FIXUP_FLASH_OFFSET reg
-       la t0, __data_loc
-       REG_L t1, _xip_phys_offset
-       sub \reg, \reg, t1
-       add \reg, \reg, t0
-.endm
-_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
-_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
-#else
-.macro XIP_FIXUP_OFFSET reg
-.endm
-.macro XIP_FIXUP_FLASH_OFFSET reg
-.endm
-#endif /* CONFIG_XIP_KERNEL */
-
 __HEAD
 ENTRY(_start)
        /*
@@ -89,7 +69,8 @@ pe_head_start:
 
 .align 2
 #ifdef CONFIG_MMU
-relocate:
+       .global relocate_enable_mmu
+relocate_enable_mmu:
        /* Relocate return address */
        la a1, kernel_map
        XIP_FIXUP_OFFSET a1
@@ -184,7 +165,7 @@ secondary_start_sbi:
        /* Enable virtual memory and relocate to virtual address */
        la a0, swapper_pg_dir
        XIP_FIXUP_OFFSET a0
-       call relocate
+       call relocate_enable_mmu
 #endif
        call setup_trap_vector
        tail smp_callin
@@ -328,7 +309,7 @@ clear_bss_done:
 #ifdef CONFIG_MMU
        la a0, early_pg_dir
        XIP_FIXUP_OFFSET a0
-       call relocate
+       call relocate_enable_mmu
 #endif /* CONFIG_MMU */
 
        call setup_trap_vector
index 4a48287513c375b5f5c5016bd8d09666b3dbcec7..c29cef90d1ddf64a8b8692961a478acb7150dda0 100644 (file)
@@ -69,7 +69,7 @@ static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
        return 0;
 }
 
-static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location,
+static int apply_r_riscv_rvc_branch_rela(struct module *me, u32 *location,
                                         Elf_Addr v)
 {
        ptrdiff_t offset = (void *)v - (void *)location;
@@ -301,7 +301,7 @@ static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
        [R_RISCV_64]                    = apply_r_riscv_64_rela,
        [R_RISCV_BRANCH]                = apply_r_riscv_branch_rela,
        [R_RISCV_JAL]                   = apply_r_riscv_jal_rela,
-       [R_RISCV_RVC_BRANCH]            = apply_r_riscv_rcv_branch_rela,
+       [R_RISCV_RVC_BRANCH]            = apply_r_riscv_rvc_branch_rela,
        [R_RISCV_RVC_JUMP]              = apply_r_riscv_rvc_jump_rela,
        [R_RISCV_PCREL_HI20]            = apply_r_riscv_pcrel_hi20_rela,
        [R_RISCV_PCREL_LO12_I]          = apply_r_riscv_pcrel_lo12_i_rela,
index 55faa4991b876fb86c71f072efb2163f1109155d..3348a61de7d998c6a87ce8e7113d67fefc1c955a 100644 (file)
@@ -68,7 +68,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
 
 static bool fill_callchain(void *entry, unsigned long pc)
 {
-       return perf_callchain_store(entry, pc);
+       return perf_callchain_store(entry, pc) == 0;
 }
 
 void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
index 03ac3aa611f59c0b3e6dffd454d643fc3748d793..504b496787aa4d384e5bae04dc35bf5f104123d7 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/string.h>
 #include <asm/switch_to.h>
 #include <asm/thread_info.h>
+#include <asm/cpuidle.h>
 
 register unsigned long gp_in_global __asm__("gp");
 
@@ -37,7 +38,7 @@ extern asmlinkage void ret_from_kernel_thread(void);
 
 void arch_cpu_idle(void)
 {
-       wait_for_interrupt();
+       cpu_do_idle();
        raw_local_irq_enable();
 }
 
index 14d2b53ec322d86005d2b94ffa92c21e179d29df..08d11a53f39e7a767a643b27b996b2ce36a3fb16 100644 (file)
@@ -14,8 +14,6 @@
 
 #include <asm/stacktrace.h>
 
-register unsigned long sp_in_global __asm__("sp");
-
 #ifdef CONFIG_FRAME_POINTER
 
 void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
@@ -30,7 +28,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
                pc = instruction_pointer(regs);
        } else if (task == NULL || task == current) {
                fp = (unsigned long)__builtin_frame_address(0);
-               sp = sp_in_global;
+               sp = current_stack_pointer;
                pc = (unsigned long)walk_stackframe;
        } else {
                /* task blocked in __switch_to */
@@ -78,7 +76,7 @@ void notrace walk_stackframe(struct task_struct *task,
                sp = user_stack_pointer(regs);
                pc = instruction_pointer(regs);
        } else if (task == NULL || task == current) {
-               sp = sp_in_global;
+               sp = current_stack_pointer;
                pc = (unsigned long)walk_stackframe;
        } else {
                /* task blocked in __switch_to */
diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c
new file mode 100644 (file)
index 0000000..9ba24fb
--- /dev/null
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#include <linux/ftrace.h>
+#include <asm/csr.h>
+#include <asm/suspend.h>
+
+static void suspend_save_csrs(struct suspend_context *context)
+{
+       context->scratch = csr_read(CSR_SCRATCH);
+       context->tvec = csr_read(CSR_TVEC);
+       context->ie = csr_read(CSR_IE);
+
+       /*
+        * No need to save/restore IP CSR (i.e. MIP or SIP) because:
+        *
+        * 1. For no-MMU (M-mode) kernel, the bits in MIP are set by
+        *    external devices (such as interrupt controller, timer, etc).
+        * 2. For MMU (S-mode) kernel, the bits in SIP are set by
+        *    M-mode firmware and external devices (such as interrupt
+        *    controller, etc).
+        */
+
+#ifdef CONFIG_MMU
+       context->satp = csr_read(CSR_SATP);
+#endif
+}
+
+static void suspend_restore_csrs(struct suspend_context *context)
+{
+       csr_write(CSR_SCRATCH, context->scratch);
+       csr_write(CSR_TVEC, context->tvec);
+       csr_write(CSR_IE, context->ie);
+
+#ifdef CONFIG_MMU
+       csr_write(CSR_SATP, context->satp);
+#endif
+}
+
+int cpu_suspend(unsigned long arg,
+               int (*finish)(unsigned long arg,
+                             unsigned long entry,
+                             unsigned long context))
+{
+       int rc = 0;
+       struct suspend_context context = { 0 };
+
+       /* Finisher should be non-NULL */
+       if (!finish)
+               return -EINVAL;
+
+       /* Save additional CSRs*/
+       suspend_save_csrs(&context);
+
+       /*
+        * Function graph tracer state gets incosistent when the kernel
+        * calls functions that never return (aka finishers) hence disable
+        * graph tracing during their execution.
+        */
+       pause_graph_tracing();
+
+       /* Save context on stack */
+       if (__cpu_suspend_enter(&context)) {
+               /* Call the finisher */
+               rc = finish(arg, __pa_symbol(__cpu_resume_enter),
+                           (ulong)&context);
+
+               /*
+                * Should never reach here, unless the suspend finisher
+                * fails. Successful cpu_suspend() should return from
+                * __cpu_resume_entry()
+                */
+               if (!rc)
+                       rc = -EOPNOTSUPP;
+       }
+
+       /* Enable function graph tracer */
+       unpause_graph_tracing();
+
+       /* Restore additional CSRs */
+       suspend_restore_csrs(&context);
+
+       return rc;
+}
diff --git a/arch/riscv/kernel/suspend_entry.S b/arch/riscv/kernel/suspend_entry.S
new file mode 100644 (file)
index 0000000..4b07b80
--- /dev/null
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/csr.h>
+
+       .text
+       .altmacro
+       .option norelax
+
+ENTRY(__cpu_suspend_enter)
+       /* Save registers (except A0 and T0-T6) */
+       REG_S   ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
+       REG_S   sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
+       REG_S   gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0)
+       REG_S   tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0)
+       REG_S   s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0)
+       REG_S   s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0)
+       REG_S   a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0)
+       REG_S   a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0)
+       REG_S   a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0)
+       REG_S   a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0)
+       REG_S   a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0)
+       REG_S   a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0)
+       REG_S   a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0)
+       REG_S   s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0)
+       REG_S   s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0)
+       REG_S   s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0)
+       REG_S   s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0)
+       REG_S   s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0)
+       REG_S   s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0)
+       REG_S   s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0)
+       REG_S   s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0)
+       REG_S   s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0)
+       REG_S   s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
+
+       /* Save CSRs */
+       csrr    t0, CSR_EPC
+       REG_S   t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0)
+       csrr    t0, CSR_STATUS
+       REG_S   t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0)
+       csrr    t0, CSR_TVAL
+       REG_S   t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0)
+       csrr    t0, CSR_CAUSE
+       REG_S   t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0)
+
+       /* Return non-zero value */
+       li      a0, 1
+
+       /* Return to C code */
+       ret
+END(__cpu_suspend_enter)
+
+ENTRY(__cpu_resume_enter)
+       /* Load the global pointer */
+       .option push
+       .option norelax
+               la gp, __global_pointer$
+       .option pop
+
+#ifdef CONFIG_MMU
+       /* Save A0 and A1 */
+       add     t0, a0, zero
+       add     t1, a1, zero
+
+       /* Enable MMU */
+       la      a0, swapper_pg_dir
+       XIP_FIXUP_OFFSET a0
+       call    relocate_enable_mmu
+
+       /* Restore A0 and A1 */
+       add     a0, t0, zero
+       add     a1, t1, zero
+#endif
+
+       /* Make A0 point to suspend context */
+       add     a0, a1, zero
+
+       /* Restore CSRs */
+       REG_L   t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0)
+       csrw    CSR_EPC, t0
+       REG_L   t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0)
+       csrw    CSR_STATUS, t0
+       REG_L   t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0)
+       csrw    CSR_TVAL, t0
+       REG_L   t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0)
+       csrw    CSR_CAUSE, t0
+
+       /* Restore registers (except A0 and T0-T6) */
+       REG_L   ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
+       REG_L   sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
+       REG_L   gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0)
+       REG_L   tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0)
+       REG_L   s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0)
+       REG_L   s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0)
+       REG_L   a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0)
+       REG_L   a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0)
+       REG_L   a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0)
+       REG_L   a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0)
+       REG_L   a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0)
+       REG_L   a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0)
+       REG_L   a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0)
+       REG_L   s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0)
+       REG_L   s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0)
+       REG_L   s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0)
+       REG_L   s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0)
+       REG_L   s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0)
+       REG_L   s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0)
+       REG_L   s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0)
+       REG_L   s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0)
+       REG_L   s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0)
+       REG_L   s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
+
+       /* Return zero value */
+       add     a0, zero, zero
+
+       /* Return to C code */
+       ret
+END(__cpu_resume_enter)
index 624166004e36c637fb5fbd8966f3412f72a84294..6785aef4cbd46e4d8bfd90c65fe2ea43c2cc1a5f 100644 (file)
@@ -653,8 +653,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
                                     vcpu->arch.isa);
        kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
 
-       csr_write(CSR_HGATP, 0);
-
        csr->vsstatus = csr_read(CSR_VSSTATUS);
        csr->vsie = csr_read(CSR_VSIE);
        csr->vstvec = csr_read(CSR_VSTVEC);
index 4449a976e5a6bab8fb691b81db972fc82ee5b12a..d4308c5120078223b88f9b401ee849e8bd2020d2 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/err.h>
 #include <linux/kvm_host.h>
 #include <linux/uaccess.h>
+#include <asm/hwcap.h>
 
 #ifdef CONFIG_FPU
 void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
index 9b80e8bed3f6e17e9e22b4264b0f2c1567a02649..e084c72104f86f55dad91577800b8952366494c2 100644 (file)
@@ -58,6 +58,7 @@ config S390
        select ALTERNATE_USER_ADDRESS_SPACE
        select ARCH_32BIT_USTAT_F_TINODE
        select ARCH_BINFMT_ELF_STATE
+       select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
        select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM
        select ARCH_ENABLE_MEMORY_HOTREMOVE
        select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
@@ -254,6 +255,10 @@ config HAVE_MARCH_Z15_FEATURES
        def_bool n
        select HAVE_MARCH_Z14_FEATURES
 
+config HAVE_MARCH_Z16_FEATURES
+       def_bool n
+       select HAVE_MARCH_Z15_FEATURES
+
 choice
        prompt "Processor type"
        default MARCH_Z196
@@ -311,6 +316,14 @@ config MARCH_Z15
          and 8561 series). The kernel will be slightly faster but will not
          work on older machines.
 
+config MARCH_Z16
+       bool "IBM z16"
+       select HAVE_MARCH_Z16_FEATURES
+       depends on $(cc-option,-march=z16)
+       help
+         Select this to enable optimizations for IBM z16 (3931 and
+         3932 series).
+
 endchoice
 
 config MARCH_Z10_TUNE
@@ -331,6 +344,9 @@ config MARCH_Z14_TUNE
 config MARCH_Z15_TUNE
        def_bool TUNE_Z15 || MARCH_Z15 && TUNE_DEFAULT
 
+config MARCH_Z16_TUNE
+       def_bool TUNE_Z16 || MARCH_Z16 && TUNE_DEFAULT
+
 choice
        prompt "Tune code generation"
        default TUNE_DEFAULT
@@ -371,6 +387,10 @@ config TUNE_Z15
        bool "IBM z15"
        depends on $(cc-option,-mtune=z15)
 
+config TUNE_Z16
+       bool "IBM z16"
+       depends on $(cc-option,-mtune=z16)
+
 endchoice
 
 config 64BIT
index 7a65bca1e5afcf6c9065860560dabeaa812e6c9e..e441b60b1812c71d655d58233882dc473d31862d 100644 (file)
@@ -42,6 +42,7 @@ mflags-$(CONFIG_MARCH_ZEC12)  := -march=zEC12
 mflags-$(CONFIG_MARCH_Z13)    := -march=z13
 mflags-$(CONFIG_MARCH_Z14)    := -march=z14
 mflags-$(CONFIG_MARCH_Z15)    := -march=z15
+mflags-$(CONFIG_MARCH_Z16)    := -march=z16
 
 export CC_FLAGS_MARCH := $(mflags-y)
 
@@ -54,6 +55,7 @@ cflags-$(CONFIG_MARCH_ZEC12_TUNE)     += -mtune=zEC12
 cflags-$(CONFIG_MARCH_Z13_TUNE)                += -mtune=z13
 cflags-$(CONFIG_MARCH_Z14_TUNE)                += -mtune=z14
 cflags-$(CONFIG_MARCH_Z15_TUNE)                += -mtune=z15
+cflags-$(CONFIG_MARCH_Z16_TUNE)                += -mtune=z16
 
 cflags-y += -Wa,-I$(srctree)/arch/$(ARCH)/include
 
index 498bed9b261b876a52122ec415515275b58cdaf5..f6dfde577ce83049db124250e3ebbb3af5c56924 100644 (file)
@@ -499,11 +499,13 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_CHELSIO is not set
 # CONFIG_NET_VENDOR_CISCO is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_DAVICOM is not set
 # CONFIG_NET_VENDOR_DEC is not set
 # CONFIG_NET_VENDOR_DLINK is not set
 # CONFIG_NET_VENDOR_EMULEX is not set
 # CONFIG_NET_VENDOR_ENGLEDER is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_FUNGIBLE is not set
 # CONFIG_NET_VENDOR_GOOGLE is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
@@ -588,13 +590,13 @@ CONFIG_MLX5_INFINIBAND=m
 CONFIG_SYNC_FILE=y
 CONFIG_VFIO=m
 CONFIG_VFIO_PCI=m
+CONFIG_MLX5_VFIO_PCI=m
 CONFIG_VFIO_MDEV=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
 CONFIG_VHOST_NET=m
 CONFIG_VHOST_VSOCK=m
-# CONFIG_SURFACE_PLATFORMS is not set
 CONFIG_S390_CCW_IOMMU=y
 CONFIG_S390_AP_IOMMU=y
 CONFIG_EXT4_FS=y
@@ -690,6 +692,7 @@ CONFIG_ENCRYPTED_KEYS=m
 CONFIG_KEY_NOTIFICATIONS=y
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_FORTIFY_SOURCE=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
@@ -733,6 +736,7 @@ CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
@@ -786,7 +790,6 @@ CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
-CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_INFO_DWARF4=y
 CONFIG_DEBUG_INFO_BTF=y
 CONFIG_GDB_SCRIPTS=y
@@ -814,6 +817,7 @@ CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
 CONFIG_DEBUG_PER_CPU_MAPS=y
 CONFIG_KFENCE=y
+CONFIG_KFENCE_DEFERRABLE=y
 CONFIG_KFENCE_STATIC_KEYS=y
 CONFIG_DEBUG_SHIRQ=y
 CONFIG_PANIC_ON_OOPS=y
index 61e36b999f67e631038137d4c648510f84ef3ff3..706df3a4a867f0619aeae6e8be6efefffd8ea26e 100644 (file)
@@ -490,11 +490,13 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_CHELSIO is not set
 # CONFIG_NET_VENDOR_CISCO is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_DAVICOM is not set
 # CONFIG_NET_VENDOR_DEC is not set
 # CONFIG_NET_VENDOR_DLINK is not set
 # CONFIG_NET_VENDOR_EMULEX is not set
 # CONFIG_NET_VENDOR_ENGLEDER is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_FUNGIBLE is not set
 # CONFIG_NET_VENDOR_GOOGLE is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
@@ -578,13 +580,13 @@ CONFIG_MLX5_INFINIBAND=m
 CONFIG_SYNC_FILE=y
 CONFIG_VFIO=m
 CONFIG_VFIO_PCI=m
+CONFIG_MLX5_VFIO_PCI=m
 CONFIG_VFIO_MDEV=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
 CONFIG_VHOST_NET=m
 CONFIG_VHOST_VSOCK=m
-# CONFIG_SURFACE_PLATFORMS is not set
 CONFIG_S390_CCW_IOMMU=y
 CONFIG_S390_AP_IOMMU=y
 CONFIG_EXT4_FS=y
@@ -720,6 +722,7 @@ CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
@@ -772,7 +775,6 @@ CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
-CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_INFO_DWARF4=y
 CONFIG_DEBUG_INFO_BTF=y
 CONFIG_GDB_SCRIPTS=y
index c55c668dc3c788ba11c6e83803c62ee50a4c75f5..a87fcc45e3071d93b5c869c89b9887236cbb2e27 100644 (file)
@@ -26,6 +26,7 @@ CONFIG_CRASH_DUMP=y
 # CONFIG_S390_GUEST is not set
 # CONFIG_SECCOMP is not set
 # CONFIG_GCC_PLUGINS is not set
+# CONFIG_BLOCK_LEGACY_AUTOLOAD is not set
 CONFIG_PARTITION_ADVANCED=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 # CONFIG_COMPACTION is not set
@@ -60,7 +61,6 @@ CONFIG_ZFCP=y
 # CONFIG_HID is not set
 # CONFIG_VIRTIO_MENU is not set
 # CONFIG_VHOST_MENU is not set
-# CONFIG_SURFACE_PLATFORMS is not set
 # CONFIG_IOMMU_SUPPORT is not set
 # CONFIG_DNOTIFY is not set
 # CONFIG_INOTIFY_USER is not set
@@ -71,10 +71,10 @@ CONFIG_LSM="yama,loadpin,safesetid,integrity"
 CONFIG_XZ_DEC_MICROLZMA=y
 CONFIG_PRINTK_TIME=y
 # CONFIG_SYMBOLIC_ERRNAME is not set
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_INFO_DWARF4=y
 CONFIG_DEBUG_INFO_BTF=y
 CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
 CONFIG_PANIC_ON_OOPS=y
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
index 955d620db23edf04d1c8c946c22a4ccc219e9f74..bb3837d7387ce037cff3d8026b253ee462fb8fb7 100644 (file)
  * a 2-byte nop if the size of the area is not divisible by 6.
  */
 .macro alt_pad_fill bytes
-       .fill   ( \bytes ) / 6, 6, 0xc0040000
-       .fill   ( \bytes ) % 6 / 4, 4, 0x47000000
-       .fill   ( \bytes ) % 6 % 4 / 2, 2, 0x0700
+       .rept   ( \bytes ) / 6
+       brcl    0,0
+       .endr
+       .rept   ( \bytes ) % 6 / 4
+       nop
+       .endr
+       .rept   ( \bytes ) % 6 % 4 / 2
+       nopr
+       .endr
 .endm
 
 /*
index d3880ca764ee2259a8a3563c51f2a7bc3db5bdcf..3f2856ed680831f2f574425dc9ed19672b75f7eb 100644 (file)
@@ -71,11 +71,18 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
        ".if " oldinstr_pad_len(num) " > 6\n"                           \
        "\tjg " e_oldinstr_pad_end "f\n"                                \
        "6620:\n"                                                       \
-       "\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \
+       "\t.rept (" oldinstr_pad_len(num) " - (6620b-662b)) / 2\n"      \
+       "\tnopr\n"                                                      \
        ".else\n"                                                       \
-       "\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n"        \
-       "\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n"   \
-       "\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n"  \
+       "\t.rept " oldinstr_pad_len(num) " / 6\n"                       \
+       "\t.brcl 0,0\n"                                                 \
+       "\t.endr\n"                                                     \
+       "\t.rept " oldinstr_pad_len(num) " %% 6 / 4\n"                  \
+       "\tnop\n"                                                       \
+       "\t.endr\n"                                                     \
+       "\t.rept " oldinstr_pad_len(num) " %% 6 %% 4 / 2\n"             \
+       "\tnopr\n"                                                      \
+       ".endr\n"                                                       \
        ".endif\n"
 
 #define OLDINSTR(oldinstr, num)                                                \
index ae75da592ccb444e5d3471df65f52502f0eb7b2e..b515cfa62bd9b045807fb2b7c087f1c79c718801 100644 (file)
@@ -60,11 +60,11 @@ static inline bool ap_instructions_available(void)
        unsigned long reg1 = 0;
 
        asm volatile(
-               "       lgr     0,%[reg0]\n"   /* qid into gr0 */
-               "       lghi    1,0\n"         /* 0 into gr1 */
-               "       lghi    2,0\n"         /* 0 into gr2 */
-               "       .long   0xb2af0000\n"  /* PQAP(TAPQ) */
-               "0:     la      %[reg1],1\n"   /* 1 into reg1 */
+               "       lgr     0,%[reg0]\n"            /* qid into gr0 */
+               "       lghi    1,0\n"                  /* 0 into gr1 */
+               "       lghi    2,0\n"                  /* 0 into gr2 */
+               "       .insn   rre,0xb2af0000,0,0\n"   /* PQAP(TAPQ) */
+               "0:     la      %[reg1],1\n"            /* 1 into reg1 */
                "1:\n"
                EX_TABLE(0b, 1b)
                : [reg1] "+&d" (reg1)
@@ -86,11 +86,11 @@ static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
        unsigned long reg2;
 
        asm volatile(
-               "       lgr     0,%[qid]\n"    /* qid into gr0 */
-               "       lghi    2,0\n"         /* 0 into gr2 */
-               "       .long   0xb2af0000\n"  /* PQAP(TAPQ) */
-               "       lgr     %[reg1],1\n"   /* gr1 (status) into reg1 */
-               "       lgr     %[reg2],2\n"   /* gr2 into reg2 */
+               "       lgr     0,%[qid]\n"             /* qid into gr0 */
+               "       lghi    2,0\n"                  /* 0 into gr2 */
+               "       .insn   rre,0xb2af0000,0,0\n"   /* PQAP(TAPQ) */
+               "       lgr     %[reg1],1\n"            /* gr1 (status) into reg1 */
+               "       lgr     %[reg2],2\n"            /* gr2 into reg2 */
                : [reg1] "=&d" (reg1), [reg2] "=&d" (reg2)
                : [qid] "d" (qid)
                : "cc", "0", "1", "2");
@@ -128,9 +128,9 @@ static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
        struct ap_queue_status reg1;
 
        asm volatile(
-               "       lgr     0,%[reg0]\n"  /* qid arg into gr0 */
-               "       .long   0xb2af0000\n" /* PQAP(RAPQ) */
-               "       lgr     %[reg1],1\n"  /* gr1 (status) into reg1 */
+               "       lgr     0,%[reg0]\n"            /* qid arg into gr0 */
+               "       .insn   rre,0xb2af0000,0,0\n"   /* PQAP(RAPQ) */
+               "       lgr     %[reg1],1\n"            /* gr1 (status) into reg1 */
                : [reg1] "=&d" (reg1)
                : [reg0] "d" (reg0)
                : "cc", "0", "1");
@@ -149,9 +149,9 @@ static inline struct ap_queue_status ap_zapq(ap_qid_t qid)
        struct ap_queue_status reg1;
 
        asm volatile(
-               "       lgr     0,%[reg0]\n"   /* qid arg into gr0 */
-               "       .long   0xb2af0000\n"  /* PQAP(ZAPQ) */
-               "       lgr     %[reg1],1\n"   /* gr1 (status) into reg1 */
+               "       lgr     0,%[reg0]\n"            /* qid arg into gr0 */
+               "       .insn   rre,0xb2af0000,0,0\n"   /* PQAP(ZAPQ) */
+               "       lgr     %[reg1],1\n"            /* gr1 (status) into reg1 */
                : [reg1] "=&d" (reg1)
                : [reg0] "d" (reg0)
                : "cc", "0", "1");
@@ -190,10 +190,10 @@ static inline int ap_qci(struct ap_config_info *config)
        struct ap_config_info *reg2 = config;
 
        asm volatile(
-               "       lgr     0,%[reg0]\n"   /* QCI fc into gr0 */
-               "       lgr     2,%[reg2]\n"   /* ptr to config into gr2 */
-               "       .long   0xb2af0000\n"  /* PQAP(QCI) */
-               "0:     la      %[reg1],0\n"   /* good case, QCI fc available */
+               "       lgr     0,%[reg0]\n"            /* QCI fc into gr0 */
+               "       lgr     2,%[reg2]\n"            /* ptr to config into gr2 */
+               "       .insn   rre,0xb2af0000,0,0\n"   /* PQAP(QCI) */
+               "0:     la      %[reg1],0\n"            /* good case, QCI fc available */
                "1:\n"
                EX_TABLE(0b, 1b)
                : [reg1] "+&d" (reg1)
@@ -246,11 +246,11 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
        reg1.qirqctrl = qirqctrl;
 
        asm volatile(
-               "       lgr     0,%[reg0]\n"   /* qid param into gr0 */
-               "       lgr     1,%[reg1]\n"   /* irq ctrl into gr1 */
-               "       lgr     2,%[reg2]\n"   /* ni addr into gr2 */
-               "       .long   0xb2af0000\n"  /* PQAP(AQIC) */
-               "       lgr     %[reg1],1\n"   /* gr1 (status) into reg1 */
+               "       lgr     0,%[reg0]\n"            /* qid param into gr0 */
+               "       lgr     1,%[reg1]\n"            /* irq ctrl into gr1 */
+               "       lgr     2,%[reg2]\n"            /* ni addr into gr2 */
+               "       .insn   rre,0xb2af0000,0,0\n"   /* PQAP(AQIC) */
+               "       lgr     %[reg1],1\n"            /* gr1 (status) into reg1 */
                : [reg1] "+&d" (reg1)
                : [reg0] "d" (reg0), [reg2] "d" (reg2)
                : "cc", "0", "1", "2");
@@ -297,11 +297,11 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
        reg1.value = apinfo->val;
 
        asm volatile(
-               "       lgr     0,%[reg0]\n"   /* qid param into gr0 */
-               "       lgr     1,%[reg1]\n"   /* qact in info into gr1 */
-               "       .long   0xb2af0000\n"  /* PQAP(QACT) */
-               "       lgr     %[reg1],1\n"   /* gr1 (status) into reg1 */
-               "       lgr     %[reg2],2\n"   /* qact out info into reg2 */
+               "       lgr     0,%[reg0]\n"            /* qid param into gr0 */
+               "       lgr     1,%[reg1]\n"            /* qact in info into gr1 */
+               "       .insn   rre,0xb2af0000,0,0\n"   /* PQAP(QACT) */
+               "       lgr     %[reg1],1\n"            /* gr1 (status) into reg1 */
+               "       lgr     %[reg2],2\n"            /* qact out info into reg2 */
                : [reg1] "+&d" (reg1), [reg2] "=&d" (reg2)
                : [reg0] "d" (reg0)
                : "cc", "0", "1", "2");
index c800199a376b01fe9dbf8a5789ad3a80e4867da7..82388da3f95ff0257e929ed51e81314c582229c9 100644 (file)
@@ -74,8 +74,17 @@ static __always_inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
        __ctl_load(reg, cr, cr);
 }
 
-void smp_ctl_set_bit(int cr, int bit);
-void smp_ctl_clear_bit(int cr, int bit);
+void smp_ctl_set_clear_bit(int cr, int bit, bool set);
+
+static inline void ctl_set_bit(int cr, int bit)
+{
+       smp_ctl_set_clear_bit(cr, bit, true);
+}
+
+static inline void ctl_clear_bit(int cr, int bit)
+{
+       smp_ctl_set_clear_bit(cr, bit, false);
+}
 
 union ctlreg0 {
        unsigned long val;
@@ -130,8 +139,5 @@ union ctlreg15 {
        };
 };
 
-#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
-#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
-
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_CTL_REG_H */
index eabab24b71dd76b1b7680aae3eaf7f1bd16a58f3..2f0a1cacdf858d42f94db5b7f652840af6761645 100644 (file)
@@ -58,7 +58,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
 
 static inline bool on_thread_stack(void)
 {
-       return !(((unsigned long)(current->stack) ^ current_stack_pointer()) & ~(THREAD_SIZE - 1));
+       return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
 }
 
 #endif
index 84ec631453254c0e510cd257157c142930162840..ff1e25d515a855f666278ca3e444ccecb9b1da80 100644 (file)
@@ -200,13 +200,7 @@ unsigned long __get_wchan(struct task_struct *p);
 /* Has task runtime instrumentation enabled ? */
 #define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
 
-static __always_inline unsigned long current_stack_pointer(void)
-{
-       unsigned long sp;
-
-       asm volatile("la %0,0(15)" : "=a" (sp));
-       return sp;
-}
+register unsigned long current_stack_pointer asm("r15");
 
 static __always_inline unsigned short stap(void)
 {
@@ -319,11 +313,18 @@ extern void (*s390_base_pgm_handler_fn)(struct pt_regs *regs);
 extern int memcpy_real(void *, unsigned long, size_t);
 extern void memcpy_absolute(void *, void *, size_t);
 
-#define mem_assign_absolute(dest, val) do {                    \
-       __typeof__(dest) __tmp = (val);                         \
-                                                               \
-       BUILD_BUG_ON(sizeof(__tmp) != sizeof(val));             \
-       memcpy_absolute(&(dest), &__tmp, sizeof(__tmp));        \
+#define put_abs_lowcore(member, x) do {                                        \
+       unsigned long __abs_address = offsetof(struct lowcore, member); \
+       __typeof__(((struct lowcore *)0)->member) __tmp = (x);          \
+                                                                       \
+       memcpy_absolute(__va(__abs_address), &__tmp, sizeof(__tmp));    \
+} while (0)
+
+#define get_abs_lowcore(x, member) do {                                        \
+       unsigned long __abs_address = offsetof(struct lowcore, member); \
+       __typeof__(((struct lowcore *)0)->member) *__ptr = &(x);        \
+                                                                       \
+       memcpy_absolute(__ptr, __va(__abs_address), sizeof(*__ptr));    \
 } while (0)
 
 extern int s390_isolate_bp(void);
index 888a2f1c9ee3b6943e974e00c38783fb787a8c7f..24a54443c8652a1512f7ea3cf5a75f6324f29fe4 100644 (file)
@@ -78,7 +78,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
 {
        typecheck(int, lp->lock);
        asm_inline volatile(
-               ALTERNATIVE("", ".long 0xb2fa0070", 49) /* NIAI 7 */
+               ALTERNATIVE("", ".insn rre,0xb2fa0000,7,0", 49) /* NIAI 7 */
                "       sth     %1,%0\n"
                : "=R" (((unsigned short *) &lp->lock)[1])
                : "d" (0) : "cc", "memory");
index 275f4258fbd590778dfdf676e0469d69a6eb333b..f8500191993df0b81f971c9a1a52534a7de8c3a8 100644 (file)
@@ -46,7 +46,7 @@ struct stack_frame {
 };
 
 /*
- * Unlike current_stack_pointer() which simply returns current value of %r15
+ * Unlike current_stack_pointer which simply contains the current value of %r15
  * current_frame_address() returns function stack frame address, which matches
  * %r15 upon function invocation. It may differ from %r15 later if function
  * allocates stack for local variables or new stack frame to call other
index ad2c996e7e93dda9e7560256f6d825d15b3d87d0..fde7e6b1df482ae7a9bc1e1fecd59ec36b56cbbf 100644 (file)
        __diag_pop();                                                                   \
        static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
 
-#endif /* _ASM_X86_SYSCALL_WRAPPER_H */
+#endif /* _ASM_S390_SYSCALL_WRAPPER_H */
index 5ebf534ef75338ba8edcd865149491ea46edd2ae..0bf06f1682d81363a07db9ea0c324c3400f74727 100644 (file)
@@ -4,6 +4,8 @@
 
 #include <linux/sched.h>
 #include <linux/ftrace.h>
+#include <linux/kprobes.h>
+#include <linux/llist.h>
 #include <asm/ptrace.h>
 #include <asm/stacktrace.h>
 
@@ -36,10 +38,21 @@ struct unwind_state {
        struct pt_regs *regs;
        unsigned long sp, ip;
        int graph_idx;
+       struct llist_node *kr_cur;
        bool reliable;
        bool error;
 };
 
+/* Recover the return address modified by kretprobe and ftrace_graph. */
+static inline unsigned long unwind_recover_ret_addr(struct unwind_state *state,
+                                                   unsigned long ip)
+{
+       ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, NULL);
+       if (is_kretprobe_trampoline(ip))
+               ip = kretprobe_find_ret_addr(state->task, (void *)state->sp, &state->kr_cur);
+       return ip;
+}
+
 void __unwind_start(struct unwind_state *state, struct task_struct *task,
                    struct pt_regs *regs, unsigned long first_frame);
 bool unwind_next_frame(struct unwind_state *state);
index 0ca572ced21b0e658a4b97b5095b8ed0fa239aed..8e8aaf48582e9a789b3d488881d6935d7ce4a621 100644 (file)
@@ -67,9 +67,5 @@ struct user {
   unsigned long magic;         /* To uniquely identify a core file */
   char u_comm[32];             /* User command that was responsible */
 };
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
 
 #endif /* _S390_USER_H */
index a601a518b569431483239d8440a6acf055cd77fc..59b69c8ab5e1f5ef37d9953ca189110662bf2754 100644 (file)
@@ -121,22 +121,22 @@ _LPP_OFFSET       = __LC_LPP
        .endm
 
        .macro BPOFF
-       ALTERNATIVE "", ".long 0xb2e8c000", 82
+       ALTERNATIVE "", ".insn rrf,0xb2e80000,0,0,12,0", 82
        .endm
 
        .macro BPON
-       ALTERNATIVE "", ".long 0xb2e8d000", 82
+       ALTERNATIVE "", ".insn rrf,0xb2e80000,0,0,13,0", 82
        .endm
 
        .macro BPENTER tif_ptr,tif_mask
-       ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \
+       ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \
                    "", 82
        .endm
 
        .macro BPEXIT tif_ptr,tif_mask
        TSTMSK  \tif_ptr,\tif_mask
-       ALTERNATIVE "jz .+8;  .long 0xb2e8c000", \
-                   "jnz .+8; .long 0xb2e8d000", 82
+       ALTERNATIVE "jz .+8;  .insn rrf,0xb2e80000,0,0,12,0", \
+                   "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", 82
        .endm
 
        /*
index 28ae7df26c4ae5377e59bbd4a6f5e49442474d79..1cc85b8ff42e5d30a055f5fb1d286941f4b78f08 100644 (file)
@@ -1646,8 +1646,8 @@ static void dump_reipl_run(struct shutdown_trigger *trigger)
 
        csum = (__force unsigned int)
               csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0);
-       mem_assign_absolute(S390_lowcore.ipib, ipib);
-       mem_assign_absolute(S390_lowcore.ipib_checksum, csum);
+       put_abs_lowcore(ipib, ipib);
+       put_abs_lowcore(ipib_checksum, csum);
        dump_run(trigger);
 }
 
index e32c14fd128218f94220e6438146c32a6d14dac8..0032bdbe8e3fa1d7a7b2440a1ee096bb590b3dc4 100644 (file)
@@ -284,11 +284,11 @@ NOKPROBE_SYMBOL(pop_kprobe);
 
 void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
 {
-       ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
-       ri->fp = NULL;
+       ri->ret_addr = (kprobe_opcode_t *)regs->gprs[14];
+       ri->fp = (void *)regs->gprs[15];
 
        /* Replace the return addr with trampoline addr */
-       regs->gprs[14] = (unsigned long) &__kretprobe_trampoline;
+       regs->gprs[14] = (unsigned long)&__kretprobe_trampoline;
 }
 NOKPROBE_SYMBOL(arch_prepare_kretprobe);
 
@@ -385,7 +385,7 @@ NOKPROBE_SYMBOL(arch_kretprobe_fixup_return);
  */
 void trampoline_probe_handler(struct pt_regs *regs)
 {
-       kretprobe_trampoline_handler(regs, NULL);
+       kretprobe_trampoline_handler(regs, (void *)regs->gprs[15]);
 }
 NOKPROBE_SYMBOL(trampoline_probe_handler);
 
index 088d57a3083f3173cef4597be2b05c3841a8483f..6ebf02e15c85812bf4a42ff4afa5d547f5d9171a 100644 (file)
@@ -54,7 +54,7 @@ static void __do_machine_kdump(void *image)
         * This need to be done *after* s390_reset_system set the
         * prefix register of this CPU to zero
         */
-       memcpy((void *) __LC_FPREGS_SAVE_AREA,
+       memcpy(absolute_pointer(__LC_FPREGS_SAVE_AREA),
               (void *)(prefix + __LC_FPREGS_SAVE_AREA), 512);
 
        __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
@@ -226,7 +226,7 @@ void arch_crash_save_vmcoreinfo(void)
        vmcoreinfo_append_str("SAMODE31=%lx\n", __samode31);
        vmcoreinfo_append_str("EAMODE31=%lx\n", __eamode31);
        vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
-       mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
+       put_abs_lowcore(vmcore_info, paddr_vmcoreinfo_note());
 }
 
 void machine_shutdown(void)
index 6b5b64e67eee06fb1e2f93de934cdaf5a72c7712..1acc2e05d70f07bffebe98ea7605d0580596bf76 100644 (file)
@@ -63,7 +63,7 @@ void __init os_info_init(void)
        os_info.version_minor = OS_INFO_VERSION_MINOR;
        os_info.magic = OS_INFO_MAGIC;
        os_info.csum = os_info_csum(&os_info);
-       mem_assign_absolute(S390_lowcore.os_info, __pa(ptr));
+       put_abs_lowcore(os_info, __pa(ptr));
 }
 
 #ifdef CONFIG_CRASH_DUMP
index 7a74ea5f7531b9eee127e5273681da6496115e0b..aa0e0e7fc773e31901eef61e4f7dfc1f763b1f29 100644 (file)
@@ -283,6 +283,10 @@ static int __init setup_elf_platform(void)
        case 0x8562:
                strcpy(elf_platform, "z15");
                break;
+       case 0x3931:
+       case 0x3932:
+               strcpy(elf_platform, "z16");
+               break;
        }
        return 0;
 }
index 84e23fcc110636d5e5607c8a71af198194909d31..d860ac3009197eb3e975bfcd32efc6b5346c0614 100644 (file)
@@ -481,11 +481,11 @@ static void __init setup_lowcore_dat_off(void)
        lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
 
        /* Setup absolute zero lowcore */
-       mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
-       mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
-       mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
-       mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
-       mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
+       put_abs_lowcore(restart_stack, lc->restart_stack);
+       put_abs_lowcore(restart_fn, lc->restart_fn);
+       put_abs_lowcore(restart_data, lc->restart_data);
+       put_abs_lowcore(restart_source, lc->restart_source);
+       put_abs_lowcore(restart_psw, lc->restart_psw);
 
        lc->spinlock_lockval = arch_spin_lockval(0);
        lc->spinlock_index = 0;
@@ -501,6 +501,7 @@ static void __init setup_lowcore_dat_off(void)
 static void __init setup_lowcore_dat_on(void)
 {
        struct lowcore *lc = lowcore_ptr[0];
+       int cr;
 
        __ctl_clear_bit(0, 28);
        S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
@@ -509,10 +510,10 @@ static void __init setup_lowcore_dat_on(void)
        S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
        __ctl_store(S390_lowcore.cregs_save_area, 0, 15);
        __ctl_set_bit(0, 28);
-       mem_assign_absolute(S390_lowcore.restart_flags, RESTART_FLAG_CTLREGS);
-       mem_assign_absolute(S390_lowcore.program_new_psw, lc->program_new_psw);
-       memcpy_absolute(&S390_lowcore.cregs_save_area, lc->cregs_save_area,
-                       sizeof(S390_lowcore.cregs_save_area));
+       put_abs_lowcore(restart_flags, RESTART_FLAG_CTLREGS);
+       put_abs_lowcore(program_new_psw, lc->program_new_psw);
+       for (cr = 0; cr < ARRAY_SIZE(lc->cregs_save_area); cr++)
+               put_abs_lowcore(cregs_save_area[cr], lc->cregs_save_area[cr]);
 }
 
 static struct resource code_resource = {
index 127da1850b064ed70ffbbd3945f392ebbe8890ba..30c91d56593374fcb8b43a1be5a5fe6eefa00a3c 100644 (file)
@@ -213,7 +213,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
        if (nmi_alloc_mcesa(&lc->mcesad))
                goto out;
        lowcore_ptr[cpu] = lc;
-       pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
+       pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, __pa(lc));
        return 0;
 
 out:
@@ -326,10 +326,17 @@ static void pcpu_delegate(struct pcpu *pcpu,
        /* Stop target cpu (if func returns this stops the current cpu). */
        pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
        /* Restart func on the target cpu and stop the current cpu. */
-       mem_assign_absolute(lc->restart_stack, stack);
-       mem_assign_absolute(lc->restart_fn, (unsigned long) func);
-       mem_assign_absolute(lc->restart_data, (unsigned long) data);
-       mem_assign_absolute(lc->restart_source, source_cpu);
+       if (lc) {
+               lc->restart_stack = stack;
+               lc->restart_fn = (unsigned long)func;
+               lc->restart_data = (unsigned long)data;
+               lc->restart_source = source_cpu;
+       } else {
+               put_abs_lowcore(restart_stack, stack);
+               put_abs_lowcore(restart_fn, (unsigned long)func);
+               put_abs_lowcore(restart_data, (unsigned long)data);
+               put_abs_lowcore(restart_source, source_cpu);
+       }
        __bpon();
        asm volatile(
                "0:     sigp    0,%0,%2 # sigp restart to target cpu\n"
@@ -570,39 +577,27 @@ static void smp_ctl_bit_callback(void *info)
 }
 
 static DEFINE_SPINLOCK(ctl_lock);
-static unsigned long ctlreg;
 
-/*
- * Set a bit in a control register of all cpus
- */
-void smp_ctl_set_bit(int cr, int bit)
+void smp_ctl_set_clear_bit(int cr, int bit, bool set)
 {
-       struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
-
-       spin_lock(&ctl_lock);
-       memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
-       __set_bit(bit, &ctlreg);
-       memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
-       spin_unlock(&ctl_lock);
-       on_each_cpu(smp_ctl_bit_callback, &parms, 1);
-}
-EXPORT_SYMBOL(smp_ctl_set_bit);
-
-/*
- * Clear a bit in a control register of all cpus
- */
-void smp_ctl_clear_bit(int cr, int bit)
-{
-       struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
+       struct ec_creg_mask_parms parms = { .cr = cr, };
+       u64 ctlreg;
 
+       if (set) {
+               parms.orval = 1UL << bit;
+               parms.andval = -1UL;
+       } else {
+               parms.orval = 0;
+               parms.andval = ~(1UL << bit);
+       }
        spin_lock(&ctl_lock);
-       memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
-       __clear_bit(bit, &ctlreg);
-       memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
+       get_abs_lowcore(ctlreg, cregs_save_area[cr]);
+       ctlreg = (ctlreg & parms.andval) | parms.orval;
+       put_abs_lowcore(cregs_save_area[cr], ctlreg);
        spin_unlock(&ctl_lock);
        on_each_cpu(smp_ctl_bit_callback, &parms, 1);
 }
-EXPORT_SYMBOL(smp_ctl_clear_bit);
+EXPORT_SYMBOL(smp_ctl_set_clear_bit);
 
 #ifdef CONFIG_CRASH_DUMP
 
index b98f25029b8e60adb08aae0ee8d51bee52c6f5d6..fb85e797946db64e6f7403a4a1a41a5976269010 100644 (file)
@@ -21,8 +21,7 @@ uapi: $(uapi-hdrs-y)
 
 
 # Create output directory if not already present
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 filechk_syshdr = $(CONFIG_SHELL) '$(systbl)' -H -a $(syshdr_abi_$(basetarget)) -f "$2" < $<
 
index 674c65019434e1b8fee354031943a0ba4ffbb794..1d2aa448d1031ce516bcf5afdec778e7a911d660 100644 (file)
@@ -141,10 +141,10 @@ static inline void do_fp_trap(struct pt_regs *regs, __u32 fpc)
        do_trap(regs, SIGFPE, si_code, "floating point exception");
 }
 
-static void translation_exception(struct pt_regs *regs)
+static void translation_specification_exception(struct pt_regs *regs)
 {
        /* May never happen. */
-       panic("Translation exception");
+       panic("Translation-Specification Exception");
 }
 
 static void illegal_op(struct pt_regs *regs)
@@ -368,7 +368,7 @@ static void (*pgm_check_table[128])(struct pt_regs *regs) = {
        [0x0f]          = hfp_divide_exception,
        [0x10]          = do_dat_exception,
        [0x11]          = do_dat_exception,
-       [0x12]          = translation_exception,
+       [0x12]          = translation_specification_exception,
        [0x13]          = special_op_exception,
        [0x14]          = default_trap_handler,
        [0x15]          = operand_exception,
index 707fd99f6734d86ca1f037887f6be085b7366a3c..0ece156fdd7cc9f1c296727c2a702ac9f958c7c9 100644 (file)
@@ -64,8 +64,8 @@ bool unwind_next_frame(struct unwind_state *state)
                ip = READ_ONCE_NOCHECK(sf->gprs[8]);
                reliable = false;
                regs = NULL;
-               if (!__kernel_text_address(ip)) {
-                       /* skip bogus %r14 */
+               /* skip bogus %r14 or if is the same as regs->psw.addr */
+               if (!__kernel_text_address(ip) || state->ip == unwind_recover_ret_addr(state, ip)) {
                        state->regs = NULL;
                        return unwind_next_frame(state);
                }
@@ -103,13 +103,11 @@ bool unwind_next_frame(struct unwind_state *state)
        if (sp & 0x7)
                goto out_err;
 
-       ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, (void *) sp);
-
        /* Update unwind state */
        state->sp = sp;
-       state->ip = ip;
        state->regs = regs;
        state->reliable = reliable;
+       state->ip = unwind_recover_ret_addr(state, ip);
        return true;
 
 out_err:
@@ -161,12 +159,10 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
                ip = READ_ONCE_NOCHECK(sf->gprs[8]);
        }
 
-       ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, NULL);
-
        /* Update unwind state */
        state->sp = sp;
-       state->ip = ip;
        state->reliable = true;
+       state->ip = unwind_recover_ret_addr(state, ip);
 
        if (!first_frame)
                return;
index ab73d99483feaa5244f9c2bc288670d104aba328..156d1c25a3c1ec9a225669013153bf44c90d5363 100644 (file)
@@ -3462,7 +3462,7 @@ void exit_sie(struct kvm_vcpu *vcpu)
 /* Kick a guest cpu out of SIE to process a request synchronously */
 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
 {
-       kvm_make_request(req, vcpu);
+       __kvm_make_request(req, vcpu);
        kvm_s390_vcpu_request(vcpu);
 }
 
index 7f7c0d6af2ce9f9a6a04ee96864e2865262006c3..cc7c9599f43ee8dc8f292eabe57701de1a51dfa1 100644 (file)
@@ -137,12 +137,7 @@ static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
        /* Allocate variable storage */
        vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE);
        vlen += uv_info.guest_virt_base_stor_len;
-       /*
-        * The Create Secure Configuration Ultravisor Call does not support
-        * using large pages for the virtual memory area.
-        * This is a hardware limitation.
-        */
-       kvm->arch.pv.stor_var = vmalloc_no_huge(vlen);
+       kvm->arch.pv.stor_var = vzalloc(vlen);
        if (!kvm->arch.pv.stor_var)
                goto out_err;
        return 0;
index 692dc84cd19c87dade2551af2650421378954e62..5e7ea8b111e892ec24d82776a4abdae9e091bbee 100644 (file)
@@ -75,7 +75,7 @@ static inline int arch_load_niai4(int *lock)
        int owner;
 
        asm_inline volatile(
-               ALTERNATIVE("", ".long 0xb2fa0040", 49) /* NIAI 4 */
+               ALTERNATIVE("", ".insn rre,0xb2fa0000,4,0", 49) /* NIAI 4 */
                "       l       %0,%1\n"
                : "=d" (owner) : "Q" (*lock) : "memory");
        return owner;
@@ -86,7 +86,7 @@ static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
        int expected = old;
 
        asm_inline volatile(
-               ALTERNATIVE("", ".long 0xb2fa0080", 49) /* NIAI 8 */
+               ALTERNATIVE("", ".insn rre,0xb2fa0000,8,0", 49) /* NIAI 8 */
                "       cs      %0,%3,%1\n"
                : "=d" (old), "=Q" (*lock)
                : "0" (old), "d" (new), "Q" (*lock)
index c01f02887de46741b31a01ccbb03984d88d58275..5a053b393d5c5a05d1d17dbd638a5258780dde6c 100644 (file)
@@ -47,7 +47,7 @@ static void print_backtrace(char *bt)
 static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
                                unsigned long sp)
 {
-       int frame_count, prev_is_func2, seen_func2_func1;
+       int frame_count, prev_is_func2, seen_func2_func1, seen_kretprobe_trampoline;
        const int max_frames = 128;
        struct unwind_state state;
        size_t bt_pos = 0;
@@ -63,6 +63,7 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
        frame_count = 0;
        prev_is_func2 = 0;
        seen_func2_func1 = 0;
+       seen_kretprobe_trampoline = 0;
        unwind_for_each_frame(&state, task, regs, sp) {
                unsigned long addr = unwind_get_return_address(&state);
                char sym[KSYM_SYMBOL_LEN];
@@ -88,6 +89,8 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
                if (prev_is_func2 && str_has_prefix(sym, "unwindme_func1"))
                        seen_func2_func1 = 1;
                prev_is_func2 = str_has_prefix(sym, "unwindme_func2");
+               if (str_has_prefix(sym, "__kretprobe_trampoline+0x0/"))
+                       seen_kretprobe_trampoline = 1;
        }
 
        /* Check the results. */
@@ -103,6 +106,10 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
                kunit_err(current_test, "Maximum number of frames exceeded\n");
                ret = -EINVAL;
        }
+       if (seen_kretprobe_trampoline) {
+               kunit_err(current_test, "__kretprobe_trampoline+0x0 in unwinding results\n");
+               ret = -EINVAL;
+       }
        if (ret || force_bt)
                print_backtrace(bt);
        kfree(bt);
@@ -132,36 +139,50 @@ static struct unwindme *unwindme;
 #define UWM_PGM                        0x40    /* Unwind from program check handler */
 #define UWM_KPROBE_ON_FTRACE   0x80    /* Unwind from kprobe handler called via ftrace. */
 #define UWM_FTRACE             0x100   /* Unwind from ftrace handler. */
-#define UWM_KRETPROBE          0x200   /* Unwind kretprobe handlers. */
+#define UWM_KRETPROBE          0x200   /* Unwind through kretprobed function. */
+#define UWM_KRETPROBE_HANDLER  0x400   /* Unwind from kretprobe handler. */
 
-static __always_inline unsigned long get_psw_addr(void)
+static __always_inline struct pt_regs fake_pt_regs(void)
 {
-       unsigned long psw_addr;
+       struct pt_regs regs;
+
+       memset(&regs, 0, sizeof(regs));
+       regs.gprs[15] = current_stack_pointer;
 
        asm volatile(
                "basr   %[psw_addr],0\n"
-               : [psw_addr] "=d" (psw_addr));
-       return psw_addr;
+               : [psw_addr] "=d" (regs.psw.addr));
+       return regs;
 }
 
 static int kretprobe_ret_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
 {
        struct unwindme *u = unwindme;
 
+       if (!(u->flags & UWM_KRETPROBE_HANDLER))
+               return 0;
+
        u->ret = test_unwind(NULL, (u->flags & UWM_REGS) ? regs : NULL,
                             (u->flags & UWM_SP) ? u->sp : 0);
 
        return 0;
 }
 
-static noinline notrace void test_unwind_kretprobed_func(void)
+static noinline notrace int test_unwind_kretprobed_func(struct unwindme *u)
 {
-       asm volatile("  nop\n");
+       struct pt_regs regs;
+
+       if (!(u->flags & UWM_KRETPROBE))
+               return 0;
+
+       regs = fake_pt_regs();
+       return test_unwind(NULL, (u->flags & UWM_REGS) ? &regs : NULL,
+                          (u->flags & UWM_SP) ? u->sp : 0);
 }
 
-static noinline void test_unwind_kretprobed_func_caller(void)
+static noinline int test_unwind_kretprobed_func_caller(struct unwindme *u)
 {
-       test_unwind_kretprobed_func();
+       return test_unwind_kretprobed_func(u);
 }
 
 static int test_unwind_kretprobe(struct unwindme *u)
@@ -187,10 +208,12 @@ static int test_unwind_kretprobe(struct unwindme *u)
                return -EINVAL;
        }
 
-       test_unwind_kretprobed_func_caller();
+       ret = test_unwind_kretprobed_func_caller(u);
        unregister_kretprobe(&my_kretprobe);
        unwindme = NULL;
-       return u->ret;
+       if (u->flags & UWM_KRETPROBE_HANDLER)
+               ret = u->ret;
+       return ret;
 }
 
 static int kprobe_pre_handler(struct kprobe *p, struct pt_regs *regs)
@@ -304,16 +327,13 @@ static noinline int unwindme_func4(struct unwindme *u)
                return 0;
        } else if (u->flags & (UWM_PGM | UWM_KPROBE_ON_FTRACE)) {
                return test_unwind_kprobe(u);
-       } else if (u->flags & (UWM_KRETPROBE)) {
+       } else if (u->flags & (UWM_KRETPROBE | UWM_KRETPROBE_HANDLER)) {
                return test_unwind_kretprobe(u);
        } else if (u->flags & UWM_FTRACE) {
                return test_unwind_ftrace(u);
        } else {
-               struct pt_regs regs;
+               struct pt_regs regs = fake_pt_regs();
 
-               memset(&regs, 0, sizeof(regs));
-               regs.psw.addr = get_psw_addr();
-               regs.gprs[15] = current_stack_pointer();
                return test_unwind(NULL,
                                   (u->flags & UWM_REGS) ? &regs : NULL,
                                   (u->flags & UWM_SP) ? u->sp : 0);
@@ -452,6 +472,10 @@ static const struct test_params param_list[] = {
        TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_SP),
        TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_REGS),
        TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_SP | UWM_REGS),
+       TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER),
+       TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER | UWM_SP),
+       TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER | UWM_REGS),
+       TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER | UWM_SP | UWM_REGS),
 };
 
 /*
index 792f8e0f217895324d906953bbb33673362b2f05..e563cb65c0c4c8d3cf76c36a4952291e2eab5234 100644 (file)
@@ -69,6 +69,7 @@ struct zpci_dev *get_zdev_by_fid(u32 fid)
        list_for_each_entry(tmp, &zpci_list, entry) {
                if (tmp->fid == fid) {
                        zdev = tmp;
+                       zpci_zdev_get(zdev);
                        break;
                }
        }
@@ -399,7 +400,7 @@ EXPORT_SYMBOL(pci_iounmap);
 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
                    int size, u32 *val)
 {
-       struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
+       struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
 
        return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
 }
@@ -407,7 +408,7 @@ static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
                     int size, u32 val)
 {
-       struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
+       struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
 
        return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
 }
index e359d2686178b8575dedc4a07021a70cc078246d..e96c9860e0644b4d1dd144d5c0fccbdf484a60cb 100644 (file)
@@ -19,7 +19,8 @@ void zpci_bus_remove_device(struct zpci_dev *zdev, bool set_error);
 void zpci_release_device(struct kref *kref);
 static inline void zpci_zdev_put(struct zpci_dev *zdev)
 {
-       kref_put(&zdev->kref, zpci_release_device);
+       if (zdev)
+               kref_put(&zdev->kref, zpci_release_device);
 }
 
 static inline void zpci_zdev_get(struct zpci_dev *zdev)
@@ -32,8 +33,8 @@ void zpci_free_domain(int domain);
 int zpci_setup_bus_resources(struct zpci_dev *zdev,
                             struct list_head *resources);
 
-static inline struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus,
-                                              unsigned int devfn)
+static inline struct zpci_dev *zdev_from_bus(struct pci_bus *bus,
+                                            unsigned int devfn)
 {
        struct zpci_bus *zbus = bus->sysdata;
 
index 63f3e057c168f3067663f4a1159200326d569979..1057d7af4a551b7498a0a37f3856faa1ac52cd99 100644 (file)
@@ -23,6 +23,8 @@
 #include <asm/clp.h>
 #include <uapi/asm/clp.h>
 
+#include "pci_bus.h"
+
 bool zpci_unique_uid;
 
 void update_uid_checking(bool new)
@@ -404,8 +406,11 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
                return;
 
        zdev = get_zdev_by_fid(entry->fid);
-       if (!zdev)
-               zpci_create_device(entry->fid, entry->fh, entry->config_state);
+       if (zdev) {
+               zpci_zdev_put(zdev);
+               return;
+       }
+       zpci_create_device(entry->fid, entry->fh, entry->config_state);
 }
 
 int clp_scan_pci_devices(void)
index 2e3e5b2789257ea0735725659ead4e075a311d49..ea9db5cea64e30a8b7133b57b059f9375608a82e 100644 (file)
@@ -269,7 +269,7 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
               pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
 
        if (!pdev)
-               return;
+               goto no_pdev;
 
        switch (ccdf->pec) {
        case 0x003a: /* Service Action or Error Recovery Successful */
@@ -286,6 +286,8 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
                break;
        }
        pci_dev_put(pdev);
+no_pdev:
+       zpci_zdev_put(zdev);
 }
 
 void zpci_event_error(void *data)
@@ -314,6 +316,7 @@ static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh)
 static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
 {
        struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
+       bool existing_zdev = !!zdev;
        enum zpci_state state;
 
        zpci_dbg(3, "avl fid:%x, fh:%x, pec:%x\n",
@@ -378,6 +381,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
        default:
                break;
        }
+       if (existing_zdev)
+               zpci_zdev_put(zdev);
 }
 
 void zpci_event_availability(void *data)
index 7dfd3f6461e640aaedf08cdcf76a645597c80199..12ea0f3f441991fff47370e5841c5156a4cdc95d 100644 (file)
@@ -52,10 +52,4 @@ struct user {
        char            u_comm[32];             /* user command name */
 };
 
-#define NBPG                   PAGE_SIZE
-#define UPAGES                 1
-#define HOST_TEXT_START_ADDR   (u.start_code)
-#define HOST_DATA_START_ADDR   (u.start_data)
-#define HOST_STACK_END_ADDR    (u.start_stack + u.u_ssize * NBPG)
-
 #endif /* __ASM_SH_USER_H */
index 6713c65a25e15cfe96824cbcdba6bb8f222ef274..b265e4bc16c2e0bc52c63819fda704ebda00de31 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index d63f18dd058d8cc81c18b37e82dd26fa8700e7b9..8440c16dfb225b3f30d668c3c855d1c015fbd259 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index 6ead1e24045765bb77826662eb278eb29d48fc96..8ca67a69268306c8c3ca17857d5efcd9c380e0e6 100644 (file)
@@ -224,7 +224,7 @@ void mconsole_go(struct mc_request *req)
 
 void mconsole_stop(struct mc_request *req)
 {
-       deactivate_fd(req->originating_fd, MCONSOLE_IRQ);
+       block_signals();
        os_set_fd_block(req->originating_fd, 1);
        mconsole_reply(req, "stopped", 0, 0);
        for (;;) {
@@ -247,6 +247,7 @@ void mconsole_stop(struct mc_request *req)
        }
        os_set_fd_block(req->originating_fd, 0);
        mconsole_reply(req, "", 0, 0);
+       unblock_signals();
 }
 
 static DEFINE_SPINLOCK(mc_devices_lock);
index 5b5b64cb1071335603feb1d57b6570d08ec04294..3c62ae81df62cd03fb611bac1d9e67470619f766 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <stdio.h>
 #include <stdlib.h>
+#include <string.h>
 #include <errno.h>
 #include <termios.h>
 #include <unistd.h>
@@ -167,14 +168,29 @@ static void port_pre_exec(void *arg)
 int port_connection(int fd, int *socket, int *pid_out)
 {
        int new, err;
-       char *argv[] = { "/usr/sbin/in.telnetd", "-L",
+       char *env;
+       char *argv[] = { "in.telnetd", "-L",
                         OS_LIB_PATH "/uml/port-helper", NULL };
        struct port_pre_exec_data data;
 
+       if ((env = getenv("UML_PORT_HELPER")))
+               argv[2] = env;
+
        new = accept(fd, NULL, 0);
        if (new < 0)
                return -errno;
 
+       err = os_access(argv[2], X_OK);
+       if (err < 0) {
+               printk(UM_KERN_ERR "port_connection : error accessing port-helper "
+                      "executable at %s: %s\n", argv[2], strerror(-err));
+               if (env == NULL)
+                       printk(UM_KERN_ERR "Set UML_PORT_HELPER environment "
+                               "variable to path to uml-utilities port-helper "
+                               "binary\n");
+               goto out_close;
+       }
+
        err = os_pipe(socket, 0, 0);
        if (err < 0)
                goto out_close;
index 69d2d0049a613de2fbb3c8527857ad7906fc19ef..b03269faef714764748b16f4645e9496cb555633 100644 (file)
@@ -1526,13 +1526,19 @@ static void do_io(struct io_thread_req *req, struct io_desc *desc)
                        }
                        break;
                case REQ_OP_DISCARD:
-               case REQ_OP_WRITE_ZEROES:
                        n = os_falloc_punch(req->fds[bit], off, len);
                        if (n) {
                                req->error = map_error(-n);
                                return;
                        }
                        break;
+               case REQ_OP_WRITE_ZEROES:
+                       n = os_falloc_zeroes(req->fds[bit], off, len);
+                       if (n) {
+                               req->error = map_error(-n);
+                               return;
+                       }
+                       break;
                default:
                        WARN_ON_ONCE(1);
                        req->error = BLK_STS_NOTSUPP;
index 4fc1a5d70dcf31fd11a8a479112fbef662ea4921..1d6f6a66766c685881c01790c60f27b8ea7bdb67 100644 (file)
@@ -67,6 +67,7 @@ static LIST_HEAD(vector_devices);
 static int driver_registered;
 
 static void vector_eth_configure(int n, struct arglist *def);
+static int vector_mmsg_rx(struct vector_private *vp, int budget);
 
 /* Argument accessors to set variables (and/or set default values)
  * mtu, buffer sizing, default headroom, etc
@@ -77,7 +78,6 @@ static void vector_eth_configure(int n, struct arglist *def);
 #define DEFAULT_VECTOR_SIZE 64
 #define TX_SMALL_PACKET 128
 #define MAX_IOV_SIZE (MAX_SKB_FRAGS + 1)
-#define MAX_ITERATIONS 64
 
 static const struct {
        const char string[ETH_GSTRING_LEN];
@@ -458,7 +458,6 @@ static int vector_send(struct vector_queue *qi)
                                        vp->estats.tx_queue_running_average =
                                                (vp->estats.tx_queue_running_average + result) >> 1;
                                }
-                               netif_trans_update(qi->dev);
                                netif_wake_queue(qi->dev);
                                /* if TX is busy, break out of the send loop,
                                 *  poll write IRQ will reschedule xmit for us
@@ -470,8 +469,6 @@ static int vector_send(struct vector_queue *qi)
                        }
                }
                spin_unlock(&qi->head_lock);
-       } else {
-               tasklet_schedule(&vp->tx_poll);
        }
        return queue_depth;
 }
@@ -608,7 +605,7 @@ out_fail:
 
 /*
  * We do not use the RX queue as a proper wraparound queue for now
- * This is not necessary because the consumption via netif_rx()
+ * This is not necessary because the consumption via napi_gro_receive()
  * happens in-line. While we can try using the return code of
  * netif_rx() for flow control there are no drivers doing this today.
  * For this RX specific use we ignore the tail/head locks and
@@ -896,7 +893,7 @@ static int vector_legacy_rx(struct vector_private *vp)
                        skb->protocol = eth_type_trans(skb, skb->dev);
                        vp->dev->stats.rx_bytes += skb->len;
                        vp->dev->stats.rx_packets++;
-                       netif_rx(skb);
+                       napi_gro_receive(&vp->napi, skb);
                } else {
                        dev_kfree_skb_irq(skb);
                }
@@ -955,7 +952,7 @@ drop:
  * mmsg vector matched to an skb vector which we prepared earlier.
  */
 
-static int vector_mmsg_rx(struct vector_private *vp)
+static int vector_mmsg_rx(struct vector_private *vp, int budget)
 {
        int packet_count, i;
        struct vector_queue *qi = vp->rx_queue;
@@ -972,6 +969,9 @@ static int vector_mmsg_rx(struct vector_private *vp)
 
        /* Fire the Lazy Gun - get as many packets as we can in one go. */
 
+       if (budget > qi->max_depth)
+               budget = qi->max_depth;
+
        packet_count = uml_vector_recvmmsg(
                vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0);
 
@@ -1021,7 +1021,7 @@ static int vector_mmsg_rx(struct vector_private *vp)
                         */
                        vp->dev->stats.rx_bytes += skb->len;
                        vp->dev->stats.rx_packets++;
-                       netif_rx(skb);
+                       napi_gro_receive(&vp->napi, skb);
                } else {
                        /* Overlay header too short to do anything - discard.
                         * We can actually keep this skb and reuse it,
@@ -1044,23 +1044,6 @@ static int vector_mmsg_rx(struct vector_private *vp)
        return packet_count;
 }
 
-static void vector_rx(struct vector_private *vp)
-{
-       int err;
-       int iter = 0;
-
-       if ((vp->options & VECTOR_RX) > 0)
-               while (((err = vector_mmsg_rx(vp)) > 0) && (iter < MAX_ITERATIONS))
-                       iter++;
-       else
-               while (((err = vector_legacy_rx(vp)) > 0) && (iter < MAX_ITERATIONS))
-                       iter++;
-       if ((err != 0) && net_ratelimit())
-               netdev_err(vp->dev, "vector_rx: error(%d)\n", err);
-       if (iter == MAX_ITERATIONS)
-               netdev_err(vp->dev, "vector_rx: device stuck, remote end may have closed the connection\n");
-}
-
 static int vector_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct vector_private *vp = netdev_priv(dev);
@@ -1085,25 +1068,15 @@ static int vector_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
        netdev_sent_queue(vp->dev, skb->len);
        queue_depth = vector_enqueue(vp->tx_queue, skb);
 
-       /* if the device queue is full, stop the upper layers and
-        * flush it.
-        */
-
-       if (queue_depth >= vp->tx_queue->max_depth - 1) {
-               vp->estats.tx_kicks++;
-               netif_stop_queue(dev);
-               vector_send(vp->tx_queue);
-               return NETDEV_TX_OK;
-       }
-       if (netdev_xmit_more()) {
+       if (queue_depth < vp->tx_queue->max_depth && netdev_xmit_more()) {
                mod_timer(&vp->tl, vp->coalesce);
                return NETDEV_TX_OK;
+       } else {
+               queue_depth = vector_send(vp->tx_queue);
+               if (queue_depth > 0)
+                       napi_schedule(&vp->napi);
        }
-       if (skb->len < TX_SMALL_PACKET) {
-               vp->estats.tx_kicks++;
-               vector_send(vp->tx_queue);
-       } else
-               tasklet_schedule(&vp->tx_poll);
+
        return NETDEV_TX_OK;
 }
 
@@ -1114,7 +1087,7 @@ static irqreturn_t vector_rx_interrupt(int irq, void *dev_id)
 
        if (!netif_running(dev))
                return IRQ_NONE;
-       vector_rx(vp);
+       napi_schedule(&vp->napi);
        return IRQ_HANDLED;
 
 }
@@ -1133,8 +1106,7 @@ static irqreturn_t vector_tx_interrupt(int irq, void *dev_id)
         * tweaking the IRQ mask less costly
         */
 
-       if (vp->in_write_poll)
-               tasklet_schedule(&vp->tx_poll);
+       napi_schedule(&vp->napi);
        return IRQ_HANDLED;
 
 }
@@ -1161,7 +1133,8 @@ static int vector_net_close(struct net_device *dev)
                um_free_irq(vp->tx_irq, dev);
                vp->tx_irq = 0;
        }
-       tasklet_kill(&vp->tx_poll);
+       napi_disable(&vp->napi);
+       netif_napi_del(&vp->napi);
        if (vp->fds->rx_fd > 0) {
                if (vp->bpf)
                        uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf);
@@ -1193,15 +1166,32 @@ static int vector_net_close(struct net_device *dev)
        return 0;
 }
 
-/* TX tasklet */
-
-static void vector_tx_poll(struct tasklet_struct *t)
+static int vector_poll(struct napi_struct *napi, int budget)
 {
-       struct vector_private *vp = from_tasklet(vp, t, tx_poll);
+       struct vector_private *vp = container_of(napi, struct vector_private, napi);
+       int work_done = 0;
+       int err;
+       bool tx_enqueued = false;
 
-       vp->estats.tx_kicks++;
-       vector_send(vp->tx_queue);
+       if ((vp->options & VECTOR_TX) != 0)
+               tx_enqueued = (vector_send(vp->tx_queue) > 0);
+       if ((vp->options & VECTOR_RX) > 0)
+               err = vector_mmsg_rx(vp, budget);
+       else {
+               err = vector_legacy_rx(vp);
+               if (err > 0)
+                       err = 1;
+       }
+       if (err > 0)
+               work_done += err;
+
+       if (tx_enqueued || err > 0)
+               napi_schedule(napi);
+       if (work_done < budget)
+               napi_complete_done(napi, work_done);
+       return work_done;
 }
+
 static void vector_reset_tx(struct work_struct *work)
 {
        struct vector_private *vp =
@@ -1265,6 +1255,9 @@ static int vector_net_open(struct net_device *dev)
                        goto out_close;
        }
 
+       netif_napi_add(vp->dev, &vp->napi, vector_poll, get_depth(vp->parsed));
+       napi_enable(&vp->napi);
+
        /* READ IRQ */
        err = um_request_irq(
                irq_rr + VECTOR_BASE_IRQ, vp->fds->rx_fd,
@@ -1306,15 +1299,15 @@ static int vector_net_open(struct net_device *dev)
                uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf);
 
        netif_start_queue(dev);
+       vector_reset_stats(vp);
 
        /* clear buffer - it can happen that the host side of the interface
         * is full when we get here. In this case, new data is never queued,
         * SIGIOs never arrive, and the net never works.
         */
 
-       vector_rx(vp);
+       napi_schedule(&vp->napi);
 
-       vector_reset_stats(vp);
        vdevice = find_device(vp->unit);
        vdevice->opened = 1;
 
@@ -1543,15 +1536,16 @@ static const struct net_device_ops vector_netdev_ops = {
 #endif
 };
 
-
 static void vector_timer_expire(struct timer_list *t)
 {
        struct vector_private *vp = from_timer(vp, t, tl);
 
        vp->estats.tx_kicks++;
-       vector_send(vp->tx_queue);
+       napi_schedule(&vp->napi);
 }
 
+
+
 static void vector_eth_configure(
                int n,
                struct arglist *def
@@ -1634,7 +1628,6 @@ static void vector_eth_configure(
        });
 
        dev->features = dev->hw_features = (NETIF_F_SG | NETIF_F_FRAGLIST);
-       tasklet_setup(&vp->tx_poll, vector_tx_poll);
        INIT_WORK(&vp->reset_tx, vector_reset_tx);
 
        timer_setup(&vp->tl, vector_timer_expire, 0);
index 8fff93a75a927e0d69cd8528ada8d8616ac78151..2a1fa8e0f3e149c6f5617251d0bf8ef51975fcd1 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/ctype.h>
 #include <linux/workqueue.h>
 #include <linux/interrupt.h>
+
 #include "vector_user.h"
 
 /* Queue structure specially adapted for multiple enqueue/dequeue
@@ -72,6 +73,7 @@ struct vector_private {
        struct list_head list;
        spinlock_t lock;
        struct net_device *dev;
+       struct napi_struct              napi    ____cacheline_aligned;
 
        int unit;
 
@@ -115,7 +117,6 @@ struct vector_private {
 
        spinlock_t stats_lock;
 
-       struct tasklet_struct tx_poll;
        bool rexmit_scheduled;
        bool opened;
        bool in_write_poll;
index e4ffeb9a1fa4d6a9748cf0aa4d3176230234a361..c650e428432b57b98223349df49c26368a4d59f0 100644 (file)
@@ -771,7 +771,7 @@ int uml_vector_detach_bpf(int fd, void *bpf)
                printk(KERN_ERR BPF_DETACH_FAIL, prog->len, prog->filter, fd, -errno);
        return err;
 }
-void *uml_vector_default_bpf(void *mac)
+void *uml_vector_default_bpf(const void *mac)
 {
        struct sock_filter *bpf;
        uint32_t *mac1 = (uint32_t *)(mac + 2);
index d29d5fdd98fac4ef7118e281ef852b58d234823d..3a73d17a0161dbe342241000f7ccc4d685e38e1b 100644 (file)
@@ -97,7 +97,7 @@ extern int uml_vector_recvmmsg(
        unsigned int vlen,
        unsigned int flags
 );
-extern void *uml_vector_default_bpf(void *mac);
+extern void *uml_vector_default_bpf(const void *mac);
 extern void *uml_vector_user_bpf(char *filename);
 extern int uml_vector_attach_bpf(int fd, void *bpf);
 extern int uml_vector_detach_bpf(int fd, void *bpf);
index b08bd29662538de0318894ee871270f448a9c41c..f1f3f52f1e9ccc280e7fecf4916fe0a090063f04 100644 (file)
@@ -24,7 +24,6 @@ generic-y += softirq_stack.h
 generic-y += switch_to.h
 generic-y += topology.h
 generic-y += trace_clock.h
-generic-y += word-at-a-time.h
 generic-y += kprobes.h
 generic-y += mm_hooks.h
 generic-y += vga.h
index f512704a9ec7b99b7435a0d6e7009c2e07828f19..22b39de73c246905680cf502be5377ede69af962 100644 (file)
@@ -4,8 +4,10 @@
 
 #ifdef CONFIG_64BIT
 #undef CONFIG_X86_32
+#define TT_CPU_INF_XOR_DEFAULT (AVX_SELECT(&xor_block_sse_pf64))
 #else
 #define CONFIG_X86_32 1
+#define TT_CPU_INF_XOR_DEFAULT (AVX_SELECT(&xor_block_8regs))
 #endif
 
 #include <asm/cpufeature.h>
@@ -16,7 +18,7 @@
 #undef XOR_SELECT_TEMPLATE
 /* pick an arbitrary one - measuring isn't possible with inf-cpu */
 #define XOR_SELECT_TEMPLATE(x) \
-       (time_travel_mode == TT_MODE_INFCPU ? &xor_block_8regs : NULL)
+       (time_travel_mode == TT_MODE_INFCPU ? TT_CPU_INF_XOR_DEFAULT : x))
 #endif
 
 #endif
index 00214059d9ecae5d9b28af4dbdb0c6a3e15eba46..fafde1d5416edf8f31c84b1b39842b280552bef1 100644 (file)
@@ -168,6 +168,7 @@ extern unsigned os_major(unsigned long long dev);
 extern unsigned os_minor(unsigned long long dev);
 extern unsigned long long os_makedev(unsigned major, unsigned minor);
 extern int os_falloc_punch(int fd, unsigned long long offset, int count);
+extern int os_falloc_zeroes(int fd, unsigned long long offset, int count);
 extern int os_eventfd(unsigned int initval, int flags);
 extern int os_sendmsg_fds(int fd, const void *buf, unsigned int len,
                          const int *fds, unsigned int fds_num);
index ca69d72025f39d401eddbbef983ba7a785925aba..484141b06938ffcd222a7c40055acd07155b04a1 100644 (file)
@@ -25,8 +25,8 @@ void uml_dtb_init(void)
                return;
        }
 
-       unflatten_device_tree();
        early_init_fdt_scan_reserved_mem();
+       unflatten_device_tree();
 }
 
 static int __init uml_dtb_setup(char *line, int *add)
index e4421dbc4c36c70adbc66eb997f6a42c05548cf4..fc4450db59bd444b6ac92fd23db270d1461a0e39 100644 (file)
@@ -625,6 +625,15 @@ int os_falloc_punch(int fd, unsigned long long offset, int len)
        return n;
 }
 
+int os_falloc_zeroes(int fd, unsigned long long offset, int len)
+{
+       int n = fallocate(fd, FALLOC_FL_ZERO_RANGE|FALLOC_FL_KEEP_SIZE, offset, len);
+
+       if (n < 0)
+               return -errno;
+       return n;
+}
+
 int os_eventfd(unsigned int initval, int flags)
 {
        int fd = eventfd(initval, flags);
index 32e88baf18dd47efa72e2878fa8a698913894f22..b459745f52e248063e44909bb5705cbe05932da1 100644 (file)
@@ -4,6 +4,7 @@
  */
 
 #include <stdlib.h>
+#include <string.h>
 #include <unistd.h>
 #include <errno.h>
 #include <sched.h>
@@ -99,6 +100,10 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
                CATCH_EINTR(waitpid(pid, NULL, __WALL));
        }
 
+       if (ret < 0)
+               printk(UM_KERN_ERR "run_helper : failed to exec %s on host: %s\n",
+                      argv[0], strerror(-ret));
+
 out_free2:
        kfree(data.buf);
 out_close:
index 6c5041c5560b28a16cac11bdfa7a0403b31528b0..4d5591d96d8c30f847055454245d0bb9b5779602 100644 (file)
 
 static timer_t event_high_res_timer = 0;
 
-static inline long long timeval_to_ns(const struct timeval *tv)
-{
-       return ((long long) tv->tv_sec * UM_NSEC_PER_SEC) +
-               tv->tv_usec * UM_NSEC_PER_USEC;
-}
-
 static inline long long timespec_to_ns(const struct timespec *ts)
 {
        return ((long long) ts->tv_sec * UM_NSEC_PER_SEC) + ts->tv_nsec;
index ff45a27fc29ceb6a47ff51d0b507b1a5a3f59286..b0142e01002e3dd4fc3299f23da6ce2e6aaa3997 100644 (file)
@@ -122,7 +122,6 @@ config X86
        select ARCH_WANT_GENERAL_HUGETLB
        select ARCH_WANT_HUGE_PMD_SHARE
        select ARCH_WANT_LD_ORPHAN_WARN
-       select ARCH_WANTS_RT_DELAYED_SIGNALS
        select ARCH_WANTS_THP_SWAP              if X86_64
        select ARCH_HAS_PARANOID_L1D_FLUSH
        select BUILDTIME_TABLE_SORT
index 71124cf8630c75a027261153389d738f027d7f0e..98a4852ed6a09537be587cd3e26a3153c3ff5ce7 100644 (file)
@@ -1,5 +1,7 @@
+CONFIG_WERROR=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_USELIB=y
 CONFIG_AUDIT=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -11,23 +13,30 @@ CONFIG_TASK_XACCT=y
 CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_LOG_BUF_SHIFT=18
 CONFIG_CGROUPS=y
+CONFIG_BLK_CGROUP=y
 CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_RDMA=y
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
 CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
 CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_MISC=y
+CONFIG_CGROUP_DEBUG=y
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
-# CONFIG_64BIT is not set
 CONFIG_SMP=y
-CONFIG_X86_GENERIC=y
-CONFIG_HPET_TIMER=y
+CONFIG_HYPERVISOR_GUEST=y
+CONFIG_PARAVIRT=y
+CONFIG_NR_CPUS=8
 CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
-CONFIG_X86_REBOOTFIXUPS=y
 CONFIG_MICROCODE_AMD=y
 CONFIG_X86_MSR=y
 CONFIG_X86_CPUID=y
-CONFIG_HIGHPTE=y
 CONFIG_X86_CHECK_BIOS_CORRUPTION=y
 # CONFIG_MTRR_SANITIZER is not set
 CONFIG_EFI=y
@@ -43,12 +52,15 @@ CONFIG_ACPI_BGRT=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
 CONFIG_X86_ACPI_CPUFREQ=y
-CONFIG_EFI_VARS=y
 CONFIG_KPROBES=y
 CONFIG_JUMP_LABEL=y
+CONFIG_COMPAT_32BIT_TIME=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_BLK_CGROUP_IOLATENCY=y
+CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_CGROUP_IOPRIO=y
 CONFIG_BINFMT_MISC=y
 CONFIG_NET=y
 CONFIG_PACKET=y
@@ -103,12 +115,16 @@ CONFIG_IP6_NF_FILTER=y
 CONFIG_IP6_NF_TARGET_REJECT=y
 CONFIG_IP6_NF_MANGLE=y
 CONFIG_NET_SCHED=y
+CONFIG_NET_CLS_CGROUP=y
 CONFIG_NET_EMATCH=y
 CONFIG_NET_CLS_ACT=y
+CONFIG_CGROUP_NET_PRIO=y
 CONFIG_CFG80211=y
 CONFIG_MAC80211=y
 CONFIG_MAC80211_LEDS=y
 CONFIG_RFKILL=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
 CONFIG_PCI=y
 CONFIG_PCIEPORTBUS=y
 CONFIG_PCI_MSI=y
@@ -119,13 +135,16 @@ CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_DEBUG_DEVRES=y
 CONFIG_CONNECTOR=y
+CONFIG_EFI_VARS=y
+CONFIG_EFI_CAPSULE_LOADER=y
 CONFIG_BLK_DEV_LOOP=y
+CONFIG_VIRTIO_BLK=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_BLK_DEV_SR=y
 CONFIG_CHR_DEV_SG=y
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_SPI_ATTRS=y
-# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_SCSI_VIRTIO=y
 CONFIG_ATA=y
 CONFIG_SATA_AHCI=y
 CONFIG_ATA_PIIX=y
@@ -143,6 +162,7 @@ CONFIG_MACINTOSH_DRIVERS=y
 CONFIG_MAC_EMUMOUSEBTN=y
 CONFIG_NETDEVICES=y
 CONFIG_NETCONSOLE=y
+CONFIG_VIRTIO_NET=y
 CONFIG_BNX2=y
 CONFIG_TIGON3=y
 CONFIG_NET_TULIP=y
@@ -170,6 +190,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
 CONFIG_SERIAL_8250_DETECT_IRQ=y
 CONFIG_SERIAL_8250_RSA=y
 CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_VIRTIO_CONSOLE=y
 CONFIG_HW_RANDOM=y
 CONFIG_NVRAM=y
 CONFIG_HPET=y
@@ -181,12 +202,7 @@ CONFIG_AGP_AMD64=y
 CONFIG_AGP_INTEL=y
 CONFIG_DRM=y
 CONFIG_DRM_I915=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_TILEBLITTING=y
-CONFIG_FB_EFI=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_DRM_VIRTIO_GPU=y
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_HRTIMER=y
@@ -219,6 +235,8 @@ CONFIG_USB_STORAGE=y
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_HCTOSYS is not set
 CONFIG_DMADEVICES=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_INPUT=y
 CONFIG_EEEPC_LAPTOP=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
@@ -240,6 +258,7 @@ CONFIG_NFS_FS=y
 CONFIG_NFS_V3_ACL=y
 CONFIG_NFS_V4=y
 CONFIG_ROOT_NFS=y
+CONFIG_9P_FS=y
 CONFIG_NLS_DEFAULT="utf8"
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ASCII=y
@@ -251,14 +270,15 @@ CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
 CONFIG_SECURITY_SELINUX_DISABLE=y
 CONFIG_PRINTK_TIME=y
+CONFIG_FRAME_WARN=1024
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_STACK_USAGE=y
-CONFIG_DEBUG_STACKOVERFLOW=y
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_SCHEDSTATS=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
 CONFIG_EARLY_PRINTK_DBGP=y
 CONFIG_DEBUG_BOOT_PARAMS=y
-CONFIG_KALLSYMS_ALL=y
+CONFIG_UNWINDER_FRAME_POINTER=y
+# CONFIG_64BIT is not set
index 92b1169ec90b20a9326ef6ed45f4dd8f60b0600a..69784505a7a85cc6f1cf75a0a7ca26e433426c2d 100644 (file)
@@ -1,3 +1,4 @@
+CONFIG_WERROR=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_AUDIT=y
@@ -11,14 +12,25 @@ CONFIG_TASK_XACCT=y
 CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_LOG_BUF_SHIFT=18
 CONFIG_CGROUPS=y
+CONFIG_BLK_CGROUP=y
 CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_RDMA=y
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
 CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
 CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_MISC=y
+CONFIG_CGROUP_DEBUG=y
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_SMP=y
+CONFIG_HYPERVISOR_GUEST=y
+CONFIG_PARAVIRT=y
 CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
 CONFIG_MICROCODE_AMD=y
 CONFIG_X86_MSR=y
@@ -41,12 +53,14 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
 CONFIG_X86_ACPI_CPUFREQ=y
 CONFIG_IA32_EMULATION=y
-CONFIG_EFI_VARS=y
 CONFIG_KPROBES=y
 CONFIG_JUMP_LABEL=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_BLK_CGROUP_IOLATENCY=y
+CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_CGROUP_IOPRIO=y
 CONFIG_BINFMT_MISC=y
 CONFIG_NET=y
 CONFIG_PACKET=y
@@ -101,12 +115,16 @@ CONFIG_IP6_NF_FILTER=y
 CONFIG_IP6_NF_TARGET_REJECT=y
 CONFIG_IP6_NF_MANGLE=y
 CONFIG_NET_SCHED=y
+CONFIG_NET_CLS_CGROUP=y
 CONFIG_NET_EMATCH=y
 CONFIG_NET_CLS_ACT=y
+CONFIG_CGROUP_NET_PRIO=y
 CONFIG_CFG80211=y
 CONFIG_MAC80211=y
 CONFIG_MAC80211_LEDS=y
 CONFIG_RFKILL=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
 CONFIG_PCI=y
 CONFIG_PCIEPORTBUS=y
 CONFIG_HOTPLUG_PCI=y
@@ -116,13 +134,15 @@ CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_DEBUG_DEVRES=y
 CONFIG_CONNECTOR=y
+CONFIG_EFI_VARS=y
 CONFIG_BLK_DEV_LOOP=y
+CONFIG_VIRTIO_BLK=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_BLK_DEV_SR=y
 CONFIG_CHR_DEV_SG=y
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_SPI_ATTRS=y
-# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_SCSI_VIRTIO=y
 CONFIG_ATA=y
 CONFIG_SATA_AHCI=y
 CONFIG_ATA_PIIX=y
@@ -138,6 +158,7 @@ CONFIG_MACINTOSH_DRIVERS=y
 CONFIG_MAC_EMUMOUSEBTN=y
 CONFIG_NETDEVICES=y
 CONFIG_NETCONSOLE=y
+CONFIG_VIRTIO_NET=y
 CONFIG_TIGON3=y
 CONFIG_NET_TULIP=y
 CONFIG_E100=y
@@ -162,6 +183,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
 CONFIG_SERIAL_8250_DETECT_IRQ=y
 CONFIG_SERIAL_8250_RSA=y
 CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_VIRTIO_CONSOLE=y
 CONFIG_HW_RANDOM=y
 # CONFIG_HW_RANDOM_INTEL is not set
 # CONFIG_HW_RANDOM_AMD is not set
@@ -175,12 +197,7 @@ CONFIG_AGP_AMD64=y
 CONFIG_AGP_INTEL=y
 CONFIG_DRM=y
 CONFIG_DRM_I915=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_TILEBLITTING=y
-CONFIG_FB_EFI=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_DRM_VIRTIO_GPU=y
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_HRTIMER=y
@@ -213,6 +230,8 @@ CONFIG_USB_STORAGE=y
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_HCTOSYS is not set
 CONFIG_DMADEVICES=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_INPUT=y
 CONFIG_EEEPC_LAPTOP=y
 CONFIG_AMD_IOMMU=y
 CONFIG_INTEL_IOMMU=y
@@ -237,6 +256,7 @@ CONFIG_NFS_FS=y
 CONFIG_NFS_V3_ACL=y
 CONFIG_NFS_V4=y
 CONFIG_ROOT_NFS=y
+CONFIG_9P_FS=y
 CONFIG_NLS_DEFAULT="utf8"
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ASCII=y
@@ -257,4 +277,3 @@ CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
 CONFIG_EARLY_PRINTK_DBGP=y
 CONFIG_DEBUG_BOOT_PARAMS=y
-CONFIG_KALLSYMS_ALL=y
index 7f3886eeb2ffaac3458fd9c67f0d21616762bc6b..eca5d6eff1322416488b7f739003914f05c75f33 100644 (file)
@@ -3,8 +3,7 @@ out := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
 # Create output directory if not already present
-_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') \
-         $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')
+$(shell mkdir -p $(out) $(uapi))
 
 syscall32 := $(src)/syscall_32.tbl
 syscall64 := $(src)/syscall_64.tbl
index e88791b420eeb85fa143d831870829419c78323e..fc7f458eb3de6351ee25bee5e8b4ae5df8564245 100644 (file)
@@ -302,7 +302,7 @@ static struct extra_reg intel_spr_extra_regs[] __read_mostly = {
        INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
        INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
-       INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
+       INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
        INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
        INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
        EVENT_EXTRA_END
@@ -5536,7 +5536,11 @@ static void intel_pmu_check_event_constraints(struct event_constraint *event_con
                        /* Disabled fixed counters which are not in CPUID */
                        c->idxmsk64 &= intel_ctrl;
 
-                       if (c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES)
+                       /*
+                        * Don't extend the pseudo-encoding to the
+                        * generic counters
+                        */
+                       if (!use_fixed_pseudo_encoding(c->code))
                                c->idxmsk64 |= (1ULL << num_counters) - 1;
                }
                c->idxmsk64 &=
@@ -6212,6 +6216,7 @@ __init int intel_pmu_init(void)
 
        case INTEL_FAM6_ALDERLAKE:
        case INTEL_FAM6_ALDERLAKE_L:
+       case INTEL_FAM6_RAPTORLAKE:
                /*
                 * Alder Lake has 2 types of CPU, core and atom.
                 *
index c6262b154c3a292ea1b985193c4cd4b79640ad6c..5d7762288a243f6b8cb9db4c329d581331566074 100644 (file)
@@ -40,7 +40,7 @@
  * Model specific counters:
  *     MSR_CORE_C1_RES: CORE C1 Residency Counter
  *                      perf code: 0x00
- *                      Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL
+ *                      Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL
  *                      Scope: Core (each processor core has a MSR)
  *     MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
  *                            perf code: 0x01
  *                            perf code: 0x02
  *                            Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
  *                                             SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
- *                                             TGL,TNT,RKL,ADL
+ *                                             TGL,TNT,RKL,ADL,RPL
  *                            Scope: Core
  *     MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
  *                            perf code: 0x03
  *                            Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML,
- *                                             ICL,TGL,RKL,ADL
+ *                                             ICL,TGL,RKL,ADL,RPL
  *                            Scope: Core
  *     MSR_PKG_C2_RESIDENCY:  Package C2 Residency Counter.
  *                            perf code: 0x00
  *                            Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
- *                                             KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL
+ *                                             KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL,
+ *                                             RPL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C3_RESIDENCY:  Package C3 Residency Counter.
  *                            perf code: 0x01
  *                            Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
  *                                             GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL,
- *                                             ADL
+ *                                             ADL,RPL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C6_RESIDENCY:  Package C6 Residency Counter.
  *                            perf code: 0x02
  *                            Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
  *                                             SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
- *                                             TGL,TNT,RKL,ADL
+ *                                             TGL,TNT,RKL,ADL,RPL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C7_RESIDENCY:  Package C7 Residency Counter.
  *                            perf code: 0x03
  *                            Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL,
- *                                             KBL,CML,ICL,TGL,RKL,ADL
+ *                                             KBL,CML,ICL,TGL,RKL,ADL,RPL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C8_RESIDENCY:  Package C8 Residency Counter.
  *                            perf code: 0x04
  *                            Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
- *                                             ADL
+ *                                             ADL,RPL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C9_RESIDENCY:  Package C9 Residency Counter.
  *                            perf code: 0x05
  *                            Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
- *                                             ADL
+ *                                             ADL,RPL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
  *                            perf code: 0x06
  *                            Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
- *                                             TNT,RKL,ADL
+ *                                             TNT,RKL,ADL,RPL
  *                            Scope: Package (physical package)
  *
  */
@@ -680,6 +681,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE,          &icl_cstates),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE,           &adl_cstates),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,         &adl_cstates),
+       X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE,          &adl_cstates),
        { },
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
index e497da9bf42707b654dc00d94ad5c998927918b2..7695dcae280e7067db8658827398489efbc04e54 100644 (file)
@@ -1828,6 +1828,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE,          &rkl_uncore_init),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE,           &adl_uncore_init),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,         &adl_uncore_init),
+       X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE,          &adl_uncore_init),
        X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,    &spr_uncore_init),
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D,      &snr_uncore_init),
        {},
index f698a55bde8189884793e40481f060e132eef42b..4262351f52b60b5fd0a0f1bbbf2828c61f33c76d 100644 (file)
 #define PCI_DEVICE_ID_INTEL_ADL_14_IMC         0x4650
 #define PCI_DEVICE_ID_INTEL_ADL_15_IMC         0x4668
 #define PCI_DEVICE_ID_INTEL_ADL_16_IMC         0x4670
+#define PCI_DEVICE_ID_INTEL_RPL_1_IMC          0xA700
+#define PCI_DEVICE_ID_INTEL_RPL_2_IMC          0xA702
+#define PCI_DEVICE_ID_INTEL_RPL_3_IMC          0xA706
+#define PCI_DEVICE_ID_INTEL_RPL_4_IMC          0xA709
 
 /* SNB event control */
 #define SNB_UNC_CTL_EV_SEL_MASK                        0x000000ff
@@ -1406,6 +1410,22 @@ static const struct pci_device_id tgl_uncore_pci_ids[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_16_IMC),
                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
        },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RPL_1_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RPL_2_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RPL_3_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RPL_4_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
        { /* end: all zeroes */ }
 };
 
index 96c775abe31ff7ebbf0fd39b1c6e65395b78af96..6d759f88315c6ba9bb3bdead7611196a607f43c7 100644 (file)
@@ -103,6 +103,7 @@ static bool test_intel(int idx, void *data)
        case INTEL_FAM6_ROCKETLAKE:
        case INTEL_FAM6_ALDERLAKE:
        case INTEL_FAM6_ALDERLAKE_L:
+       case INTEL_FAM6_RAPTORLAKE:
                if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
                        return true;
                break;
index c878fed3056fd809ffb2c541acb04e44e0bfe853..fbcfec4dc4ccd74d983d84ebad2c6b4d188bde1a 100644 (file)
 
 # define DEFINE_EXTABLE_TYPE_REG \
        ".macro extable_type_reg type:req reg:req\n"                                            \
-       ".set found, 0\n"                                                                       \
-       ".set regnr, 0\n"                                                                       \
+       ".set .Lfound, 0\n"                                                                     \
+       ".set .Lregnr, 0\n"                                                                     \
        ".irp rs,rax,rcx,rdx,rbx,rsp,rbp,rsi,rdi,r8,r9,r10,r11,r12,r13,r14,r15\n"               \
        ".ifc \\reg, %%\\rs\n"                                                                  \
-       ".set found, found+1\n"                                                                 \
-       ".long \\type + (regnr << 8)\n"                                                         \
+       ".set .Lfound, .Lfound+1\n"                                                             \
+       ".long \\type + (.Lregnr << 8)\n"                                                       \
        ".endif\n"                                                                              \
-       ".set regnr, regnr+1\n"                                                                 \
+       ".set .Lregnr, .Lregnr+1\n"                                                             \
        ".endr\n"                                                                               \
-       ".set regnr, 0\n"                                                                       \
+       ".set .Lregnr, 0\n"                                                                     \
        ".irp rs,eax,ecx,edx,ebx,esp,ebp,esi,edi,r8d,r9d,r10d,r11d,r12d,r13d,r14d,r15d\n"       \
        ".ifc \\reg, %%\\rs\n"                                                                  \
-       ".set found, found+1\n"                                                                 \
-       ".long \\type + (regnr << 8)\n"                                                         \
+       ".set .Lfound, .Lfound+1\n"                                                             \
+       ".long \\type + (.Lregnr << 8)\n"                                                       \
        ".endif\n"                                                                              \
-       ".set regnr, regnr+1\n"                                                                 \
+       ".set .Lregnr, .Lregnr+1\n"                                                             \
        ".endr\n"                                                                               \
-       ".if (found != 1)\n"                                                                    \
+       ".if (.Lfound != 1)\n"                                                                  \
        ".error \"extable_type_reg: bad register argument\"\n"                                  \
        ".endif\n"                                                                              \
        ".endm\n"
index 4d20a293c6fd420952f7897840b06cafe71a1d0c..aaf0cb0db4aecfebc103498e1f2f8106f5b52733 100644 (file)
@@ -78,9 +78,9 @@ do {                                                          \
  */
 #define __WARN_FLAGS(flags)                                    \
 do {                                                           \
-       __auto_type f = BUGFLAG_WARNING|(flags);                \
+       __auto_type __flags = BUGFLAG_WARNING|(flags);          \
        instrumentation_begin();                                \
-       _BUG_FLAGS(ASM_UD2, f, ASM_REACHABLE);                  \
+       _BUG_FLAGS(ASM_UD2, __flags, ASM_REACHABLE);            \
        instrumentation_end();                                  \
 } while (0)
 
index 7516e4199b3c61cc90295a7506b52dff62183743..20fd0acd7d800b58a8b95b2ff807899af94cfae1 100644 (file)
@@ -28,15 +28,13 @@ typedef u16         compat_ipc_pid_t;
 typedef __kernel_fsid_t        compat_fsid_t;
 
 struct compat_stat {
-       compat_dev_t    st_dev;
-       u16             __pad1;
+       u32             st_dev;
        compat_ino_t    st_ino;
        compat_mode_t   st_mode;
        compat_nlink_t  st_nlink;
        __compat_uid_t  st_uid;
        __compat_gid_t  st_gid;
-       compat_dev_t    st_rdev;
-       u16             __pad2;
+       u32             st_rdev;
        u32             st_size;
        u32             st_blksize;
        u32             st_blocks;
index f6d91ecb80267840e8f3b17e10986a2af28ef6ec..e9736af126b29ee2083b6900e8354f820d5b02e8 100644 (file)
@@ -210,8 +210,6 @@ void __iomem *ioremap(resource_size_t offset, unsigned long size);
 extern void iounmap(volatile void __iomem *addr);
 #define iounmap iounmap
 
-extern void set_iounmap_nonlazy(void);
-
 #ifdef __KERNEL__
 
 void memcpy_fromio(void *, const volatile void __iomem *, size_t);
index 4138939532c6eb2530fe6b4c387a7d6a81b79179..e0c0f0e1f754c11fa44374a9e4536a6cbcaeb630 100644 (file)
@@ -249,6 +249,7 @@ enum x86_intercept_stage;
 #define PFERR_SGX_BIT 15
 #define PFERR_GUEST_FINAL_BIT 32
 #define PFERR_GUEST_PAGE_BIT 33
+#define PFERR_IMPLICIT_ACCESS_BIT 48
 
 #define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
 #define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
@@ -259,6 +260,7 @@ enum x86_intercept_stage;
 #define PFERR_SGX_MASK (1U << PFERR_SGX_BIT)
 #define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT)
 #define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)
+#define PFERR_IMPLICIT_ACCESS (1ULL << PFERR_IMPLICIT_ACCESS_BIT)
 
 #define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK |       \
                                 PFERR_WRITE_MASK |             \
@@ -430,7 +432,7 @@ struct kvm_mmu {
        void (*inject_page_fault)(struct kvm_vcpu *vcpu,
                                  struct x86_exception *fault);
        gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-                           gpa_t gva_or_gpa, u32 access,
+                           gpa_t gva_or_gpa, u64 access,
                            struct x86_exception *exception);
        int (*sync_page)(struct kvm_vcpu *vcpu,
                         struct kvm_mmu_page *sp);
@@ -512,6 +514,7 @@ struct kvm_pmu {
        u64 global_ctrl_mask;
        u64 global_ovf_ctrl_mask;
        u64 reserved_bits;
+       u64 raw_event_mask;
        u8 version;
        struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
        struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED];
@@ -971,12 +974,10 @@ enum hv_tsc_page_status {
        HV_TSC_PAGE_UNSET = 0,
        /* TSC page MSR was written by the guest, update pending */
        HV_TSC_PAGE_GUEST_CHANGED,
-       /* TSC page MSR was written by KVM userspace, update pending */
+       /* TSC page update was triggered from the host side */
        HV_TSC_PAGE_HOST_CHANGED,
        /* TSC page was properly set up and is currently active  */
        HV_TSC_PAGE_SET,
-       /* TSC page is currently being updated and therefore is inactive */
-       HV_TSC_PAGE_UPDATING,
        /* TSC page was set up with an inaccessible GPA */
        HV_TSC_PAGE_BROKEN,
 };
@@ -1040,14 +1041,17 @@ struct kvm_x86_msr_filter {
        struct msr_bitmap_range ranges[16];
 };
 
-#define APICV_INHIBIT_REASON_DISABLE    0
-#define APICV_INHIBIT_REASON_HYPERV     1
-#define APICV_INHIBIT_REASON_NESTED     2
-#define APICV_INHIBIT_REASON_IRQWIN     3
-#define APICV_INHIBIT_REASON_PIT_REINJ  4
-#define APICV_INHIBIT_REASON_X2APIC    5
-#define APICV_INHIBIT_REASON_BLOCKIRQ  6
-#define APICV_INHIBIT_REASON_ABSENT    7
+enum kvm_apicv_inhibit {
+       APICV_INHIBIT_REASON_DISABLE,
+       APICV_INHIBIT_REASON_HYPERV,
+       APICV_INHIBIT_REASON_NESTED,
+       APICV_INHIBIT_REASON_IRQWIN,
+       APICV_INHIBIT_REASON_PIT_REINJ,
+       APICV_INHIBIT_REASON_X2APIC,
+       APICV_INHIBIT_REASON_BLOCKIRQ,
+       APICV_INHIBIT_REASON_ABSENT,
+       APICV_INHIBIT_REASON_SEV,
+};
 
 struct kvm_arch {
        unsigned long n_used_mmu_pages;
@@ -1401,7 +1405,7 @@ struct kvm_x86_ops {
        void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
        void (*enable_irq_window)(struct kvm_vcpu *vcpu);
        void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
-       bool (*check_apicv_inhibit_reasons)(ulong bit);
+       bool (*check_apicv_inhibit_reasons)(enum kvm_apicv_inhibit reason);
        void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
        void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
        void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
@@ -1580,12 +1584,13 @@ static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
 #define kvm_arch_pmi_in_guest(vcpu) \
        ((vcpu) && (vcpu)->arch.handling_intr_from_guest)
 
-int kvm_mmu_module_init(void);
-void kvm_mmu_module_exit(void);
+void kvm_mmu_x86_module_init(void);
+int kvm_mmu_vendor_module_init(void);
+void kvm_mmu_vendor_module_exit(void);
 
 void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
 int kvm_mmu_create(struct kvm_vcpu *vcpu);
-void kvm_mmu_init_vm(struct kvm *kvm);
+int kvm_mmu_init_vm(struct kvm *kvm);
 void kvm_mmu_uninit_vm(struct kvm *kvm);
 
 void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);
@@ -1795,11 +1800,22 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
 
 bool kvm_apicv_activated(struct kvm *kvm);
 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
-void kvm_request_apicv_update(struct kvm *kvm, bool activate,
-                             unsigned long bit);
+void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
+                                     enum kvm_apicv_inhibit reason, bool set);
+void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
+                                   enum kvm_apicv_inhibit reason, bool set);
+
+static inline void kvm_set_apicv_inhibit(struct kvm *kvm,
+                                        enum kvm_apicv_inhibit reason)
+{
+       kvm_set_or_clear_apicv_inhibit(kvm, reason, true);
+}
 
-void __kvm_request_apicv_update(struct kvm *kvm, bool activate,
-                               unsigned long bit);
+static inline void kvm_clear_apicv_inhibit(struct kvm *kvm,
+                                          enum kvm_apicv_inhibit reason)
+{
+       kvm_set_or_clear_apicv_inhibit(kvm, reason, false);
+}
 
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
 
index b85147d75626e366422b7ed9d290e19ff19c3a4e..d71c7e8b738d2a309335f61b236079721568d85c 100644 (file)
@@ -12,14 +12,17 @@ int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
 /* Structs and defines for the X86 specific MSI message format */
 
 typedef struct x86_msi_data {
-       u32     vector                  :  8,
-               delivery_mode           :  3,
-               dest_mode_logical       :  1,
-               reserved                :  2,
-               active_low              :  1,
-               is_level                :  1;
-
-       u32     dmar_subhandle;
+       union {
+               struct {
+                       u32     vector                  :  8,
+                               delivery_mode           :  3,
+                               dest_mode_logical       :  1,
+                               reserved                :  2,
+                               active_low              :  1,
+                               is_level                :  1;
+               };
+               u32     dmar_subhandle;
+       };
 } __attribute__ ((packed)) arch_msi_msg_data_t;
 #define arch_msi_msg_data      x86_msi_data
 
index 0eb90d21049e84a287f20b5abaf83ba359e0c824..ee15311b6be1d99e2bea11bd4c03a8a36fd8c706 100644 (file)
 #define TSX_CTRL_RTM_DISABLE           BIT(0)  /* Disable RTM feature */
 #define TSX_CTRL_CPUID_CLEAR           BIT(1)  /* Disable TSX enumeration */
 
-/* SRBDS support */
 #define MSR_IA32_MCU_OPT_CTRL          0x00000123
-#define RNGDS_MITG_DIS                 BIT(0)
+#define RNGDS_MITG_DIS                 BIT(0)  /* SRBDS support */
+#define RTM_ALLOW                      BIT(1)  /* TSX development mode */
 
 #define MSR_IA32_SYSENTER_CS           0x00000174
 #define MSR_IA32_SYSENTER_ESP          0x00000175
index a3c33b79fb8659212f637055658dad544ecbdbcb..13c0d63ed55e428226ecd004dd078ed9d412eb55 100644 (file)
@@ -38,9 +38,9 @@
 #define arch_raw_cpu_ptr(ptr)                          \
 ({                                                     \
        unsigned long tcp_ptr__;                        \
-       asm volatile("add " __percpu_arg(1) ", %0"      \
-                    : "=r" (tcp_ptr__)                 \
-                    : "m" (this_cpu_off), "0" (ptr));  \
+       asm ("add " __percpu_arg(1) ", %0"              \
+            : "=r" (tcp_ptr__)                         \
+            : "m" (this_cpu_off), "0" (ptr));          \
        (typeof(*(ptr)) __kernel __force *)tcp_ptr__;   \
 })
 #else
index 58d9e4b1fa0add445ef39ff4032fc4ae5bde6a5c..b06e4c573adddad0d734aeb72cff9afc1744285b 100644 (file)
@@ -241,6 +241,11 @@ struct x86_pmu_capability {
 #define INTEL_PMC_IDX_FIXED_SLOTS      (INTEL_PMC_IDX_FIXED + 3)
 #define INTEL_PMC_MSK_FIXED_SLOTS      (1ULL << INTEL_PMC_IDX_FIXED_SLOTS)
 
+static inline bool use_fixed_pseudo_encoding(u64 code)
+{
+       return !(code & 0xff);
+}
+
 /*
  * We model BTS tracing as another fixed-mode PMC.
  *
index ed4f8bb6c2d9c293f2ca39c9917669afcef83f77..2455d721503ece7a6f1f5a24edd8f20c18629272 100644 (file)
@@ -38,6 +38,8 @@
 #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)                       \
        __ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; int3; nop; nop; nop")
 
+#define ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)                       \
+       ARCH_DEFINE_STATIC_CALL_TRAMP(name, __static_call_return0)
 
 #define ARCH_ADD_TRAMP_KEY(name)                                       \
        asm(".pushsection .static_call_tramp_key, \"a\"         \n"     \
index 7eb2df5417fb0ea60777df6ce8a7e0901beb1e5a..f70a5108d46421d7d5887526790eead68b8fa921 100644 (file)
@@ -221,8 +221,14 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
 #define SVM_NESTED_CTL_SEV_ES_ENABLE   BIT(2)
 
 
+#define SVM_TSC_RATIO_RSVD     0xffffff0000000000ULL
+#define SVM_TSC_RATIO_MIN      0x0000000000000001ULL
+#define SVM_TSC_RATIO_MAX      0x000000ffffffffffULL
+#define SVM_TSC_RATIO_DEFAULT  0x0100000000ULL
+
+
 /* AVIC */
-#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK   (0xFF)
+#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK   (0xFFULL)
 #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT                        31
 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK               (1 << 31)
 
@@ -230,9 +236,11 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
 #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK       (0xFFFFFFFFFFULL << 12)
 #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK         (1ULL << 62)
 #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK              (1ULL << 63)
-#define AVIC_PHYSICAL_ID_TABLE_SIZE_MASK               (0xFF)
+#define AVIC_PHYSICAL_ID_TABLE_SIZE_MASK               (0xFFULL)
+
+#define AVIC_DOORBELL_PHYSICAL_ID_MASK                 GENMASK_ULL(11, 0)
 
-#define AVIC_DOORBELL_PHYSICAL_ID_MASK                 (0xFF)
+#define VMCB_AVIC_APIC_BAR_MASK                                0xFFFFFFFFFF000ULL
 
 #define AVIC_UNACCEL_ACCESS_WRITE_MASK         1
 #define AVIC_UNACCEL_ACCESS_OFFSET_MASK                0xFF0
index d72c3d66e94f7afcea6c97fb35532bbf5e232d26..8963915e533f269e31489e7c86881c4f3ebeac0e 100644 (file)
@@ -124,9 +124,5 @@ struct user{
   char u_comm[32];             /* User command that was responsible */
   int u_debugreg[8];
 };
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
 
 #endif /* _ASM_X86_USER_32_H */
index db909923611c250e44eb53fa960ed8ed0a84570b..1dd10f07ccd627d584bbcc36ae7e0f96057d6eac 100644 (file)
@@ -130,9 +130,5 @@ struct user {
   unsigned long error_code; /* CPU error code or 0 */
   unsigned long fault_address; /* CR3 or 0 */
 };
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
 
 #endif /* _ASM_X86_USER_64_H */
index ed4417500700416768b6096a905e3e5d090b4d98..e342ae4db3c4de5e88456b72e727cc0def5e6978 100644 (file)
@@ -1855,6 +1855,8 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
        validate_apic_and_package_id(c);
        x86_spec_ctrl_setup_ap();
        update_srbds_msr();
+
+       tsx_ap_init();
 }
 
 static __init int setup_noclflush(char *arg)
index ee6f23f7587d402cc6c1f4b0e6b7358dcb78a7f6..2a8e584fc991384813cab20de6b1746fbe8f1e38 100644 (file)
@@ -55,11 +55,10 @@ enum tsx_ctrl_states {
 extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;
 
 extern void __init tsx_init(void);
-extern void tsx_enable(void);
-extern void tsx_disable(void);
-extern void tsx_clear_cpuid(void);
+void tsx_ap_init(void);
 #else
 static inline void tsx_init(void) { }
+static inline void tsx_ap_init(void) { }
 #endif /* CONFIG_CPU_SUP_INTEL */
 
 extern void get_cpu_cap(struct cpuinfo_x86 *c);
index 8321c43554a1d2e2cb615b4f9d6af463eb0f082f..f7a5370a9b3b83d4ed129e44879d774d6a8be6e4 100644 (file)
@@ -717,13 +717,6 @@ static void init_intel(struct cpuinfo_x86 *c)
 
        init_intel_misc_features(c);
 
-       if (tsx_ctrl_state == TSX_CTRL_ENABLE)
-               tsx_enable();
-       else if (tsx_ctrl_state == TSX_CTRL_DISABLE)
-               tsx_disable();
-       else if (tsx_ctrl_state == TSX_CTRL_RTM_ALWAYS_ABORT)
-               tsx_clear_cpuid();
-
        split_lock_init();
        bus_lock_init();
 
index 9c7a5f049292966362384cb81e396630ff3e7a92..ec7bbac3a9f29aa4f34b5d9ba0de4d9c58381cc0 100644 (file)
@@ -19,7 +19,7 @@
 
 enum tsx_ctrl_states tsx_ctrl_state __ro_after_init = TSX_CTRL_NOT_SUPPORTED;
 
-void tsx_disable(void)
+static void tsx_disable(void)
 {
        u64 tsx;
 
@@ -39,7 +39,7 @@ void tsx_disable(void)
        wrmsrl(MSR_IA32_TSX_CTRL, tsx);
 }
 
-void tsx_enable(void)
+static void tsx_enable(void)
 {
        u64 tsx;
 
@@ -58,7 +58,7 @@ void tsx_enable(void)
        wrmsrl(MSR_IA32_TSX_CTRL, tsx);
 }
 
-static bool __init tsx_ctrl_is_supported(void)
+static bool tsx_ctrl_is_supported(void)
 {
        u64 ia32_cap = x86_read_arch_cap_msr();
 
@@ -84,7 +84,45 @@ static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
        return TSX_CTRL_ENABLE;
 }
 
-void tsx_clear_cpuid(void)
+/*
+ * Disabling TSX is not a trivial business.
+ *
+ * First of all, there's a CPUID bit: X86_FEATURE_RTM_ALWAYS_ABORT
+ * which says that TSX is practically disabled (all transactions are
+ * aborted by default). When that bit is set, the kernel unconditionally
+ * disables TSX.
+ *
+ * In order to do that, however, it needs to dance a bit:
+ *
+ * 1. The first method to disable it is through MSR_TSX_FORCE_ABORT and
+ * the MSR is present only when *two* CPUID bits are set:
+ *
+ * - X86_FEATURE_RTM_ALWAYS_ABORT
+ * - X86_FEATURE_TSX_FORCE_ABORT
+ *
+ * 2. The second method is for CPUs which do not have the above-mentioned
+ * MSR: those use a different MSR - MSR_IA32_TSX_CTRL and disable TSX
+ * through that one. Those CPUs can also have the initially mentioned
+ * CPUID bit X86_FEATURE_RTM_ALWAYS_ABORT set and for those the same strategy
+ * applies: TSX gets disabled unconditionally.
+ *
+ * When either of the two methods are present, the kernel disables TSX and
+ * clears the respective RTM and HLE feature flags.
+ *
+ * An additional twist in the whole thing presents late microcode loading
+ * which, when done, may cause for the X86_FEATURE_RTM_ALWAYS_ABORT CPUID
+ * bit to be set after the update.
+ *
+ * A subsequent hotplug operation on any logical CPU except the BSP will
+ * cause for the supported CPUID feature bits to get re-detected and, if
+ * RTM and HLE get cleared all of a sudden, but, userspace did consult
+ * them before the update, then funny explosions will happen. Long story
+ * short: the kernel doesn't modify CPUID feature bits after booting.
+ *
+ * That's why, this function's call in init_intel() doesn't clear the
+ * feature flags.
+ */
+static void tsx_clear_cpuid(void)
 {
        u64 msr;
 
@@ -97,6 +135,39 @@ void tsx_clear_cpuid(void)
                rdmsrl(MSR_TSX_FORCE_ABORT, msr);
                msr |= MSR_TFA_TSX_CPUID_CLEAR;
                wrmsrl(MSR_TSX_FORCE_ABORT, msr);
+       } else if (tsx_ctrl_is_supported()) {
+               rdmsrl(MSR_IA32_TSX_CTRL, msr);
+               msr |= TSX_CTRL_CPUID_CLEAR;
+               wrmsrl(MSR_IA32_TSX_CTRL, msr);
+       }
+}
+
+/*
+ * Disable TSX development mode
+ *
+ * When the microcode released in Feb 2022 is applied, TSX will be disabled by
+ * default on some processors. MSR 0x122 (TSX_CTRL) and MSR 0x123
+ * (IA32_MCU_OPT_CTRL) can be used to re-enable TSX for development, doing so is
+ * not recommended for production deployments. In particular, applying MD_CLEAR
+ * flows for mitigation of the Intel TSX Asynchronous Abort (TAA) transient
+ * execution attack may not be effective on these processors when Intel TSX is
+ * enabled with updated microcode.
+ */
+static void tsx_dev_mode_disable(void)
+{
+       u64 mcu_opt_ctrl;
+
+       /* Check if RTM_ALLOW exists */
+       if (!boot_cpu_has_bug(X86_BUG_TAA) || !tsx_ctrl_is_supported() ||
+           !cpu_feature_enabled(X86_FEATURE_SRBDS_CTRL))
+               return;
+
+       rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl);
+
+       if (mcu_opt_ctrl & RTM_ALLOW) {
+               mcu_opt_ctrl &= ~RTM_ALLOW;
+               wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl);
+               setup_force_cpu_cap(X86_FEATURE_RTM_ALWAYS_ABORT);
        }
 }
 
@@ -105,14 +176,14 @@ void __init tsx_init(void)
        char arg[5] = {};
        int ret;
 
+       tsx_dev_mode_disable();
+
        /*
-        * Hardware will always abort a TSX transaction if both CPUID bits
-        * RTM_ALWAYS_ABORT and TSX_FORCE_ABORT are set. In this case, it is
-        * better not to enumerate CPUID.RTM and CPUID.HLE bits. Clear them
-        * here.
+        * Hardware will always abort a TSX transaction when the CPUID bit
+        * RTM_ALWAYS_ABORT is set. In this case, it is better not to enumerate
+        * CPUID.RTM and CPUID.HLE bits. Clear them here.
         */
-       if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT) &&
-           boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
+       if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) {
                tsx_ctrl_state = TSX_CTRL_RTM_ALWAYS_ABORT;
                tsx_clear_cpuid();
                setup_clear_cpu_cap(X86_FEATURE_RTM);
@@ -175,3 +246,16 @@ void __init tsx_init(void)
                setup_force_cpu_cap(X86_FEATURE_HLE);
        }
 }
+
+void tsx_ap_init(void)
+{
+       tsx_dev_mode_disable();
+
+       if (tsx_ctrl_state == TSX_CTRL_ENABLE)
+               tsx_enable();
+       else if (tsx_ctrl_state == TSX_CTRL_DISABLE)
+               tsx_disable();
+       else if (tsx_ctrl_state == TSX_CTRL_RTM_ALWAYS_ABORT)
+               /* See comment over that function for more details. */
+               tsx_clear_cpuid();
+}
index a7f617a3981d451c002317ac9bb64ab3baeaaa27..97529552dd2496421cd7f684c38e7a3629ad40c5 100644 (file)
@@ -37,7 +37,6 @@ static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
        } else
                memcpy(buf, vaddr + offset, csize);
 
-       set_iounmap_nonlazy();
        iounmap((void __iomem *)vaddr);
        return csize;
 }
index 19821f027cb39e0d73fd3e7c17bc4977c2b7d875..c049561f373a0c6b1f7a7aa8a8346f0f898c1304 100644 (file)
@@ -415,9 +415,6 @@ int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf,
                xpkru = get_xsave_addr(&kstate->regs.xsave, XFEATURE_PKRU);
                *vpkru = xpkru->pkru;
        }
-
-       /* Ensure that XCOMP_BV is set up for XSAVES */
-       xstate_init_xcomp_bv(&kstate->regs.xsave, kstate->xfeatures);
        return 0;
 }
 EXPORT_SYMBOL_GPL(fpu_copy_uabi_to_guest_fpstate);
index 7c7824ae78622d7d201312d0291e63895d7838f1..39e1c8626ab999fea588e7558927724dd2f3b457 100644 (file)
@@ -81,10 +81,10 @@ static unsigned int xstate_offsets[XFEATURE_MAX] __ro_after_init =
        { [ 0 ... XFEATURE_MAX - 1] = -1};
 static unsigned int xstate_sizes[XFEATURE_MAX] __ro_after_init =
        { [ 0 ... XFEATURE_MAX - 1] = -1};
-static unsigned int xstate_comp_offsets[XFEATURE_MAX] __ro_after_init =
-       { [ 0 ... XFEATURE_MAX - 1] = -1};
-static unsigned int xstate_supervisor_only_offsets[XFEATURE_MAX] __ro_after_init =
-       { [ 0 ... XFEATURE_MAX - 1] = -1};
+static unsigned int xstate_flags[XFEATURE_MAX] __ro_after_init;
+
+#define XSTATE_FLAG_SUPERVISOR BIT(0)
+#define XSTATE_FLAG_ALIGNED64  BIT(1)
 
 /*
  * Return whether the system supports a given xfeature.
@@ -124,17 +124,41 @@ int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name)
 }
 EXPORT_SYMBOL_GPL(cpu_has_xfeatures);
 
+static bool xfeature_is_aligned64(int xfeature_nr)
+{
+       return xstate_flags[xfeature_nr] & XSTATE_FLAG_ALIGNED64;
+}
+
 static bool xfeature_is_supervisor(int xfeature_nr)
 {
+       return xstate_flags[xfeature_nr] & XSTATE_FLAG_SUPERVISOR;
+}
+
+static unsigned int xfeature_get_offset(u64 xcomp_bv, int xfeature)
+{
+       unsigned int offs, i;
+
        /*
-        * Extended State Enumeration Sub-leaves (EAX = 0DH, ECX = n, n > 1)
-        * returns ECX[0] set to (1) for a supervisor state, and cleared (0)
-        * for a user state.
+        * Non-compacted format and legacy features use the cached fixed
+        * offsets.
         */
-       u32 eax, ebx, ecx, edx;
+       if (!cpu_feature_enabled(X86_FEATURE_XSAVES) || xfeature <= XFEATURE_SSE)
+               return xstate_offsets[xfeature];
 
-       cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
-       return ecx & 1;
+       /*
+        * Compacted format offsets depend on the actual content of the
+        * compacted xsave area which is determined by the xcomp_bv header
+        * field.
+        */
+       offs = FXSAVE_SIZE + XSAVE_HDR_SIZE;
+       for_each_extended_xfeature(i, xcomp_bv) {
+               if (xfeature_is_aligned64(i))
+                       offs = ALIGN(offs, 64);
+               if (i == xfeature)
+                       break;
+               offs += xstate_sizes[i];
+       }
+       return offs;
 }
 
 /*
@@ -182,7 +206,7 @@ static bool xfeature_enabled(enum xfeature xfeature)
  * Record the offsets and sizes of various xstates contained
  * in the XSAVE state memory layout.
  */
-static void __init setup_xstate_features(void)
+static void __init setup_xstate_cache(void)
 {
        u32 eax, ebx, ecx, edx, i;
        /* start at the beginning of the "extended state" */
@@ -205,6 +229,7 @@ static void __init setup_xstate_features(void)
                cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
 
                xstate_sizes[i] = eax;
+               xstate_flags[i] = ecx;
 
                /*
                 * If an xfeature is supervisor state, the offset in EBX is
@@ -263,94 +288,6 @@ static void __init print_xstate_features(void)
        WARN_ON(nr >= XFEATURE_MAX);    \
 } while (0)
 
-/*
- * We could cache this like xstate_size[], but we only use
- * it here, so it would be a waste of space.
- */
-static int xfeature_is_aligned(int xfeature_nr)
-{
-       u32 eax, ebx, ecx, edx;
-
-       CHECK_XFEATURE(xfeature_nr);
-
-       if (!xfeature_enabled(xfeature_nr)) {
-               WARN_ONCE(1, "Checking alignment of disabled xfeature %d\n",
-                         xfeature_nr);
-               return 0;
-       }
-
-       cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
-       /*
-        * The value returned by ECX[1] indicates the alignment
-        * of state component 'i' when the compacted format
-        * of the extended region of an XSAVE area is used:
-        */
-       return !!(ecx & 2);
-}
-
-/*
- * This function sets up offsets and sizes of all extended states in
- * xsave area. This supports both standard format and compacted format
- * of the xsave area.
- */
-static void __init setup_xstate_comp_offsets(void)
-{
-       unsigned int next_offset;
-       int i;
-
-       /*
-        * The FP xstates and SSE xstates are legacy states. They are always
-        * in the fixed offsets in the xsave area in either compacted form
-        * or standard form.
-        */
-       xstate_comp_offsets[XFEATURE_FP] = 0;
-       xstate_comp_offsets[XFEATURE_SSE] = offsetof(struct fxregs_state,
-                                                    xmm_space);
-
-       if (!cpu_feature_enabled(X86_FEATURE_XSAVES)) {
-               for_each_extended_xfeature(i, fpu_kernel_cfg.max_features)
-                       xstate_comp_offsets[i] = xstate_offsets[i];
-               return;
-       }
-
-       next_offset = FXSAVE_SIZE + XSAVE_HDR_SIZE;
-
-       for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) {
-               if (xfeature_is_aligned(i))
-                       next_offset = ALIGN(next_offset, 64);
-
-               xstate_comp_offsets[i] = next_offset;
-               next_offset += xstate_sizes[i];
-       }
-}
-
-/*
- * Setup offsets of a supervisor-state-only XSAVES buffer:
- *
- * The offsets stored in xstate_comp_offsets[] only work for one specific
- * value of the Requested Feature BitMap (RFBM).  In cases where a different
- * RFBM value is used, a different set of offsets is required.  This set of
- * offsets is for when RFBM=xfeatures_mask_supervisor().
- */
-static void __init setup_supervisor_only_offsets(void)
-{
-       unsigned int next_offset;
-       int i;
-
-       next_offset = FXSAVE_SIZE + XSAVE_HDR_SIZE;
-
-       for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) {
-               if (!xfeature_is_supervisor(i))
-                       continue;
-
-               if (xfeature_is_aligned(i))
-                       next_offset = ALIGN(next_offset, 64);
-
-               xstate_supervisor_only_offsets[i] = next_offset;
-               next_offset += xstate_sizes[i];
-       }
-}
-
 /*
  * Print out xstate component offsets and sizes
  */
@@ -360,7 +297,8 @@ static void __init print_xstate_offset_size(void)
 
        for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) {
                pr_info("x86/fpu: xstate_offset[%d]: %4d, xstate_sizes[%d]: %4d\n",
-                        i, xstate_comp_offsets[i], i, xstate_sizes[i]);
+                       i, xfeature_get_offset(fpu_kernel_cfg.max_features, i),
+                       i, xstate_sizes[i]);
        }
 }
 
@@ -419,7 +357,6 @@ static void __init setup_init_fpu_buf(void)
        if (!boot_cpu_has(X86_FEATURE_XSAVE))
                return;
 
-       setup_xstate_features();
        print_xstate_features();
 
        xstate_init_xcomp_bv(&init_fpstate.regs.xsave, fpu_kernel_cfg.max_features);
@@ -448,25 +385,6 @@ static void __init setup_init_fpu_buf(void)
        fxsave(&init_fpstate.regs.fxsave);
 }
 
-static int xfeature_uncompacted_offset(int xfeature_nr)
-{
-       u32 eax, ebx, ecx, edx;
-
-       /*
-        * Only XSAVES supports supervisor states and it uses compacted
-        * format. Checking a supervisor state's uncompacted offset is
-        * an error.
-        */
-       if (XFEATURE_MASK_SUPERVISOR_ALL & BIT_ULL(xfeature_nr)) {
-               WARN_ONCE(1, "No fixed offset for xstate %d\n", xfeature_nr);
-               return -1;
-       }
-
-       CHECK_XFEATURE(xfeature_nr);
-       cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
-       return ebx;
-}
-
 int xfeature_size(int xfeature_nr)
 {
        u32 eax, ebx, ecx, edx;
@@ -644,29 +562,15 @@ static bool __init check_xstate_against_struct(int nr)
 
 static unsigned int xstate_calculate_size(u64 xfeatures, bool compacted)
 {
-       unsigned int size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
-       int i;
+       unsigned int topmost = fls64(xfeatures) -  1;
+       unsigned int offset = xstate_offsets[topmost];
 
-       for_each_extended_xfeature(i, xfeatures) {
-               /* Align from the end of the previous feature */
-               if (xfeature_is_aligned(i))
-                       size = ALIGN(size, 64);
-               /*
-                * In compacted format the enabled features are packed,
-                * i.e. disabled features do not occupy space.
-                *
-                * In non-compacted format the offsets are fixed and
-                * disabled states still occupy space in the memory buffer.
-                */
-               if (!compacted)
-                       size = xfeature_uncompacted_offset(i);
-               /*
-                * Add the feature size even for non-compacted format
-                * to make the end result correct
-                */
-               size += xfeature_size(i);
-       }
-       return size;
+       if (topmost <= XFEATURE_SSE)
+               return sizeof(struct xregs_state);
+
+       if (compacted)
+               offset = xfeature_get_offset(xfeatures, topmost);
+       return offset + xstate_sizes[topmost];
 }
 
 /*
@@ -935,6 +839,10 @@ void __init fpu__init_system_xstate(unsigned int legacy_size)
 
        /* Enable xstate instructions to be able to continue with initialization: */
        fpu__init_cpu_xstate();
+
+       /* Cache size, offset and flags for initialization */
+       setup_xstate_cache();
+
        err = init_xstate_size();
        if (err)
                goto out_disable;
@@ -950,8 +858,6 @@ void __init fpu__init_system_xstate(unsigned int legacy_size)
                                  fpu_user_cfg.max_features);
 
        setup_init_fpu_buf();
-       setup_xstate_comp_offsets();
-       setup_supervisor_only_offsets();
 
        /*
         * Paranoia check whether something in the setup modified the
@@ -1006,13 +912,19 @@ void fpu__resume_cpu(void)
  */
 static void *__raw_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
 {
-       if (!xfeature_enabled(xfeature_nr)) {
-               WARN_ON_FPU(1);
+       u64 xcomp_bv = xsave->header.xcomp_bv;
+
+       if (WARN_ON_ONCE(!xfeature_enabled(xfeature_nr)))
                return NULL;
+
+       if (cpu_feature_enabled(X86_FEATURE_XSAVES)) {
+               if (WARN_ON_ONCE(!(xcomp_bv & BIT_ULL(xfeature_nr))))
+                       return NULL;
        }
 
-       return (void *)xsave + xstate_comp_offsets[xfeature_nr];
+       return (void *)xsave + xfeature_get_offset(xcomp_bv, xfeature_nr);
 }
+
 /*
  * Given the xsave area and a state inside, this function returns the
  * address of the state.
@@ -1043,8 +955,9 @@ void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
         * We should not ever be requesting features that we
         * have not enabled.
         */
-       WARN_ONCE(!(fpu_kernel_cfg.max_features & BIT_ULL(xfeature_nr)),
-                 "get of unsupported state");
+       if (WARN_ON_ONCE(!xfeature_enabled(xfeature_nr)))
+               return NULL;
+
        /*
         * This assumes the last 'xsave*' instruction to
         * have requested that 'xfeature_nr' be saved.
@@ -1625,6 +1538,9 @@ static int __xstate_request_perm(u64 permitted, u64 requested, bool guest)
 
        /* Calculate the resulting kernel state size */
        mask = permitted | requested;
+       /* Take supervisor states into account on the host */
+       if (!guest)
+               mask |= xfeatures_mask_supervisor();
        ksize = xstate_calculate_size(mask, compacted);
 
        /* Calculate the resulting user state size */
@@ -1639,7 +1555,7 @@ static int __xstate_request_perm(u64 permitted, u64 requested, bool guest)
 
        perm = guest ? &fpu->guest_perm : &fpu->perm;
        /* Pairs with the READ_ONCE() in xstate_get_group_perm() */
-       WRITE_ONCE(perm->__state_perm, requested);
+       WRITE_ONCE(perm->__state_perm, mask);
        /* Protected by sighand lock */
        perm->__state_size = ksize;
        perm->__user_state_size = usize;
index 79e0b8d63ffa94c51c6f40d8eaf3f0511af20ff2..a22deb58f86d2e6d2f5709af95671be9c63c39c6 100644 (file)
@@ -517,7 +517,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
                } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
                        ipi_bitmap <<= min - apic_id;
                        min = apic_id;
-               } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
+               } else if (apic_id > min && apic_id < min + KVM_IPI_CLUSTER_SIZE) {
                        max = apic_id < max ? max : apic_id;
                } else {
                        ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
index 531fb4cbb63fd1d1e29ef2cb80d4b27eb0b8ed0c..aa72cefdd5be61552e820f6ff65a949bf424fd97 100644 (file)
@@ -12,10 +12,9 @@ enum insn_type {
 };
 
 /*
- * data16 data16 xorq %rax, %rax - a single 5 byte instruction that clears %rax
- * The REX.W cancels the effect of any data16.
+ * cs cs cs xorl %eax, %eax - a single 5 byte instruction that clears %[er]ax
  */
-static const u8 xor5rax[] = { 0x66, 0x66, 0x48, 0x31, 0xc0 };
+static const u8 xor5rax[] = { 0x2e, 0x2e, 0x2e, 0x31, 0xc0 };
 
 static const u8 retinsn[] = { RET_INSN_OPCODE, 0xcc, 0xcc, 0xcc, 0xcc };
 
index a00cd97b2623bb39593ae34845cd1ab642ef261d..b24ca7f4ed7c8b6603d6d54ec846504c04f0c742 100644 (file)
@@ -735,6 +735,7 @@ static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
                        if (function > READ_ONCE(max_cpuid_80000000))
                                return entry;
                }
+               break;
 
        default:
                break;
index f9c00c89829b5bad6204a01528486d9c449d25e8..89b11e7dca8aa5d994122fdf7d1831bda5952df7 100644 (file)
@@ -3540,8 +3540,10 @@ static int em_rdpid(struct x86_emulate_ctxt *ctxt)
 {
        u64 tsc_aux = 0;
 
-       if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
+       if (!ctxt->ops->guest_has_rdpid(ctxt))
                return emulate_ud(ctxt);
+
+       ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux);
        ctxt->dst.val = tsc_aux;
        return X86EMUL_CONTINUE;
 }
@@ -3642,7 +3644,7 @@ static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
 
        msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
                | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
-       r = ctxt->ops->set_msr(ctxt, msr_index, msr_data);
+       r = ctxt->ops->set_msr_with_filter(ctxt, msr_index, msr_data);
 
        if (r == X86EMUL_IO_NEEDED)
                return r;
@@ -3659,7 +3661,7 @@ static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
        u64 msr_data;
        int r;
 
-       r = ctxt->ops->get_msr(ctxt, msr_index, &msr_data);
+       r = ctxt->ops->get_msr_with_filter(ctxt, msr_index, &msr_data);
 
        if (r == X86EMUL_IO_NEEDED)
                return r;
index a32f54ab84a20b8e01901d8770289001718f36a5..46f9dfb6046947ac62a2c86ef18e0b6ef591a2f4 100644 (file)
@@ -122,9 +122,13 @@ static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
        else
                hv->synic_auto_eoi_used--;
 
-       __kvm_request_apicv_update(vcpu->kvm,
-                                  !hv->synic_auto_eoi_used,
-                                  APICV_INHIBIT_REASON_HYPERV);
+       /*
+        * Inhibit APICv if any vCPU is using SynIC's AutoEOI, which relies on
+        * the hypervisor to manually inject IRQs.
+        */
+       __kvm_set_or_clear_apicv_inhibit(vcpu->kvm,
+                                        APICV_INHIBIT_REASON_HYPERV,
+                                        !!hv->synic_auto_eoi_used);
 
        up_write(&vcpu->kvm->arch.apicv_update_lock);
 }
@@ -239,7 +243,7 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
        struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
        int ret;
 
-       if (!synic->active && !host)
+       if (!synic->active && (!host || data))
                return 1;
 
        trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
@@ -285,6 +289,9 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
        case HV_X64_MSR_EOM: {
                int i;
 
+               if (!synic->active)
+                       break;
+
                for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
                        kvm_hv_notify_acked_sint(vcpu, i);
                break;
@@ -449,6 +456,9 @@ static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
        struct kvm_lapic_irq irq;
        int ret, vector;
 
+       if (KVM_BUG_ON(!lapic_in_kernel(vcpu), vcpu->kvm))
+               return -EINVAL;
+
        if (sint >= ARRAY_SIZE(synic->sint))
                return -EINVAL;
 
@@ -661,7 +671,7 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
        struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
        struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
 
-       if (!synic->active && !host)
+       if (!synic->active && (!host || config))
                return 1;
 
        if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode &&
@@ -690,7 +700,7 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
        struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
        struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
 
-       if (!synic->active && !host)
+       if (!synic->active && (!host || count))
                return 1;
 
        trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id,
@@ -1125,11 +1135,13 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
        BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
        BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
 
+       mutex_lock(&hv->hv_lock);
+
        if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
+           hv->hv_tsc_page_status == HV_TSC_PAGE_SET ||
            hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
-               return;
+               goto out_unlock;
 
-       mutex_lock(&hv->hv_lock);
        if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
                goto out_unlock;
 
@@ -1191,45 +1203,19 @@ out_unlock:
        mutex_unlock(&hv->hv_lock);
 }
 
-void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
+void kvm_hv_request_tsc_page_update(struct kvm *kvm)
 {
        struct kvm_hv *hv = to_kvm_hv(kvm);
-       u64 gfn;
-       int idx;
-
-       if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
-           hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||
-           tsc_page_update_unsafe(hv))
-               return;
 
        mutex_lock(&hv->hv_lock);
 
-       if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
-               goto out_unlock;
-
-       /* Preserve HV_TSC_PAGE_GUEST_CHANGED/HV_TSC_PAGE_HOST_CHANGED states */
-       if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET)
-               hv->hv_tsc_page_status = HV_TSC_PAGE_UPDATING;
-
-       gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
-
-       hv->tsc_ref.tsc_sequence = 0;
-
-       /*
-        * Take the srcu lock as memslots will be accessed to check the gfn
-        * cache generation against the memslots generation.
-        */
-       idx = srcu_read_lock(&kvm->srcu);
-       if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
-                           &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
-               hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
-       srcu_read_unlock(&kvm->srcu, idx);
+       if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET &&
+           !tsc_page_update_unsafe(hv))
+               hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
 
-out_unlock:
        mutex_unlock(&hv->hv_lock);
 }
 
-
 static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
 {
        if (!hv_vcpu->enforce_cpuid)
index e19c00ee9ab33c14974af99d4a65db9432d0a2a1..da2737f2a956c2ab054af924b83012e4960997f6 100644 (file)
@@ -137,7 +137,7 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu);
 
 void kvm_hv_setup_tsc_page(struct kvm *kvm,
                           struct pvclock_vcpu_time_info *hv_clock);
-void kvm_hv_invalidate_tsc_page(struct kvm *kvm);
+void kvm_hv_request_tsc_page_update(struct kvm *kvm);
 
 void kvm_hv_init_vm(struct kvm *kvm);
 void kvm_hv_destroy_vm(struct kvm *kvm);
index 0b65a764ed3a1741b6fa550ecb469f800621d327..1c83076091af3810ce3243312cb7b553bb587c6d 100644 (file)
@@ -305,15 +305,13 @@ void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject)
         * So, deactivate APICv when PIT is in reinject mode.
         */
        if (reinject) {
-               kvm_request_apicv_update(kvm, false,
-                                        APICV_INHIBIT_REASON_PIT_REINJ);
+               kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PIT_REINJ);
                /* The initial state is preserved while ps->reinject == 0. */
                kvm_pit_reset_reinject(pit);
                kvm_register_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
                kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
        } else {
-               kvm_request_apicv_update(kvm, true,
-                                        APICV_INHIBIT_REASON_PIT_REINJ);
+               kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PIT_REINJ);
                kvm_unregister_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
                kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
        }
index 840ddb4a93025de7b8f070b02beb05ae3661bf4b..8dff25d267b79bb0485bb46a6bbe7720f076d9ae 100644 (file)
@@ -210,6 +210,8 @@ struct x86_emulate_ops {
        int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
        u64 (*get_smbase)(struct x86_emulate_ctxt *ctxt);
        void (*set_smbase)(struct x86_emulate_ctxt *ctxt, u64 smbase);
+       int (*set_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
+       int (*get_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
        int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
        int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
        int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc);
@@ -226,6 +228,7 @@ struct x86_emulate_ops {
        bool (*guest_has_long_mode)(struct x86_emulate_ctxt *ctxt);
        bool (*guest_has_movbe)(struct x86_emulate_ctxt *ctxt);
        bool (*guest_has_fxsr)(struct x86_emulate_ctxt *ctxt);
+       bool (*guest_has_rdpid)(struct x86_emulate_ctxt *ctxt);
 
        void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
 
index 80a2020c4db401e94d1e189f0b6c505fee06caf7..66b0eb0bda94e9149de3ae788e340923807bb0f9 100644 (file)
@@ -1024,6 +1024,10 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
        *r = -1;
 
        if (irq->shorthand == APIC_DEST_SELF) {
+               if (KVM_BUG_ON(!src, kvm)) {
+                       *r = 0;
+                       return true;
+               }
                *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
                return true;
        }
index bf8dbc4bb12aa2ae438ca7c7f766e26a6a09908e..e6cae6f226838c02919e60ff2d35719d90165c42 100644 (file)
@@ -214,27 +214,27 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
  */
 static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
                                  unsigned pte_access, unsigned pte_pkey,
-                                 unsigned pfec)
+                                 u64 access)
 {
-       int cpl = static_call(kvm_x86_get_cpl)(vcpu);
+       /* strip nested paging fault error codes */
+       unsigned int pfec = access;
        unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
 
        /*
-        * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
+        * For explicit supervisor accesses, SMAP is disabled if EFLAGS.AC = 1.
+        * For implicit supervisor accesses, SMAP cannot be overridden.
         *
-        * If CPL = 3, SMAP applies to all supervisor-mode data accesses
-        * (these are implicit supervisor accesses) regardless of the value
-        * of EFLAGS.AC.
+        * SMAP works on supervisor accesses only, and not_smap can
+        * be set or not set when user access with neither has any bearing
+        * on the result.
         *
-        * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
-        * the result in X86_EFLAGS_AC. We then insert it in place of
-        * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
-        * but it will be one in index if SMAP checks are being overridden.
-        * It is important to keep this branchless.
+        * We put the SMAP checking bit in place of the PFERR_RSVD_MASK bit;
+        * this bit will always be zero in pfec, but it will be one in index
+        * if SMAP checks are being disabled.
         */
-       unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
-       int index = (pfec >> 1) +
-                   (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
+       u64 implicit_access = access & PFERR_IMPLICIT_ACCESS;
+       bool not_smap = ((rflags & X86_EFLAGS_AC) | implicit_access) == X86_EFLAGS_AC;
+       int index = (pfec + (not_smap << PFERR_RSVD_BIT)) >> 1;
        bool fault = (mmu->permissions[index] >> pte_access) & 1;
        u32 errcode = PFERR_PRESENT_MASK;
 
@@ -317,12 +317,12 @@ static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count)
        atomic64_add(count, &kvm->stat.pages[level - 1]);
 }
 
-gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
+gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
                           struct x86_exception *exception);
 
 static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu,
                                      struct kvm_mmu *mmu,
-                                     gpa_t gpa, u32 access,
+                                     gpa_t gpa, u64 access,
                                      struct x86_exception *exception)
 {
        if (mmu != &vcpu->arch.nested_mmu)
index 51671cb34fb6aac3eaf3fcb5de438d8561c861b5..f9080ee50ffa0933b055122cd018eb15c5c85800 100644 (file)
@@ -2696,8 +2696,8 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
        if (*sptep == spte) {
                ret = RET_PF_SPURIOUS;
        } else {
-               trace_kvm_mmu_set_spte(level, gfn, sptep);
                flush |= mmu_spte_update(sptep, spte);
+               trace_kvm_mmu_set_spte(level, gfn, sptep);
        }
 
        if (wrprot) {
@@ -3703,7 +3703,7 @@ void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
 }
 
 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-                                 gpa_t vaddr, u32 access,
+                                 gpa_t vaddr, u64 access,
                                  struct x86_exception *exception)
 {
        if (exception)
@@ -4591,11 +4591,11 @@ static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
                         *   - X86_CR4_SMAP is set in CR4
                         *   - A user page is accessed
                         *   - The access is not a fetch
-                        *   - Page fault in kernel mode
-                        *   - if CPL = 3 or X86_EFLAGS_AC is clear
+                        *   - The access is supervisor mode
+                        *   - If implicit supervisor access or X86_EFLAGS_AC is clear
                         *
-                        * Here, we cover the first three conditions.
-                        * The fourth is computed dynamically in permission_fault();
+                        * Here, we cover the first four conditions.
+                        * The fifth is computed dynamically in permission_fault();
                         * PFERR_RSVD_MASK bit will be set in PFEC if the access is
                         * *not* subject to SMAP restrictions.
                         */
@@ -5768,17 +5768,24 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
        kvm_mmu_zap_all_fast(kvm);
 }
 
-void kvm_mmu_init_vm(struct kvm *kvm)
+int kvm_mmu_init_vm(struct kvm *kvm)
 {
        struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
+       int r;
 
+       INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
+       INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
+       INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages);
        spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
 
-       kvm_mmu_init_tdp_mmu(kvm);
+       r = kvm_mmu_init_tdp_mmu(kvm);
+       if (r < 0)
+               return r;
 
        node->track_write = kvm_mmu_pte_write;
        node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
        kvm_page_track_register_notifier(kvm, node);
+       return 0;
 }
 
 void kvm_mmu_uninit_vm(struct kvm *kvm)
@@ -5842,8 +5849,8 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
 
        if (is_tdp_mmu_enabled(kvm)) {
                for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
-                       flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start,
-                                                         gfn_end, flush);
+                       flush = kvm_tdp_mmu_zap_leafs(kvm, i, gfn_start,
+                                                     gfn_end, true, flush);
        }
 
        if (flush)
@@ -6230,12 +6237,24 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
        return 0;
 }
 
-int kvm_mmu_module_init(void)
+/*
+ * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as
+ * its default value of -1 is technically undefined behavior for a boolean.
+ */
+void kvm_mmu_x86_module_init(void)
 {
-       int ret = -ENOMEM;
-
        if (nx_huge_pages == -1)
                __set_nx_huge_pages(get_nx_auto_mode());
+}
+
+/*
+ * The bulk of the MMU initialization is deferred until the vendor module is
+ * loaded as many of the masks/values may be modified by VMX or SVM, i.e. need
+ * to be reset when a potentially different vendor module is loaded.
+ */
+int kvm_mmu_vendor_module_init(void)
+{
+       int ret = -ENOMEM;
 
        /*
         * MMU roles use union aliasing which is, generally speaking, an
@@ -6283,7 +6302,7 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
        mmu_free_memory_caches(vcpu);
 }
 
-void kvm_mmu_module_exit(void)
+void kvm_mmu_vendor_module_exit(void)
 {
        mmu_destroy_caches();
        percpu_counter_destroy(&kvm_total_used_mmu_pages);
index 252c77805eb9387b030b5121204c79f3f621c117..01fee5f67ac370691e17178f049e45ec0befd516 100644 (file)
@@ -34,9 +34,8 @@
        #define PT_HAVE_ACCESSED_DIRTY(mmu) true
        #ifdef CONFIG_X86_64
        #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
-       #define CMPXCHG cmpxchg
+       #define CMPXCHG "cmpxchgq"
        #else
-       #define CMPXCHG cmpxchg64
        #define PT_MAX_FULL_LEVELS 2
        #endif
 #elif PTTYPE == 32
@@ -52,7 +51,7 @@
        #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
        #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
        #define PT_HAVE_ACCESSED_DIRTY(mmu) true
-       #define CMPXCHG cmpxchg
+       #define CMPXCHG "cmpxchgl"
 #elif PTTYPE == PTTYPE_EPT
        #define pt_element_t u64
        #define guest_walker guest_walkerEPT
@@ -65,7 +64,9 @@
        #define PT_GUEST_DIRTY_SHIFT 9
        #define PT_GUEST_ACCESSED_SHIFT 8
        #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad)
-       #define CMPXCHG cmpxchg64
+       #ifdef CONFIG_X86_64
+       #define CMPXCHG "cmpxchgq"
+       #endif
        #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
 #else
        #error Invalid PTTYPE value
@@ -147,43 +148,36 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
                               pt_element_t __user *ptep_user, unsigned index,
                               pt_element_t orig_pte, pt_element_t new_pte)
 {
-       int npages;
-       pt_element_t ret;
-       pt_element_t *table;
-       struct page *page;
-
-       npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page);
-       if (likely(npages == 1)) {
-               table = kmap_atomic(page);
-               ret = CMPXCHG(&table[index], orig_pte, new_pte);
-               kunmap_atomic(table);
-
-               kvm_release_page_dirty(page);
-       } else {
-               struct vm_area_struct *vma;
-               unsigned long vaddr = (unsigned long)ptep_user & PAGE_MASK;
-               unsigned long pfn;
-               unsigned long paddr;
-
-               mmap_read_lock(current->mm);
-               vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE);
-               if (!vma || !(vma->vm_flags & VM_PFNMAP)) {
-                       mmap_read_unlock(current->mm);
-                       return -EFAULT;
-               }
-               pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
-               paddr = pfn << PAGE_SHIFT;
-               table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB);
-               if (!table) {
-                       mmap_read_unlock(current->mm);
-                       return -EFAULT;
-               }
-               ret = CMPXCHG(&table[index], orig_pte, new_pte);
-               memunmap(table);
-               mmap_read_unlock(current->mm);
-       }
+       signed char r;
 
-       return (ret != orig_pte);
+       if (!user_access_begin(ptep_user, sizeof(pt_element_t)))
+               return -EFAULT;
+
+#ifdef CMPXCHG
+       asm volatile("1:" LOCK_PREFIX CMPXCHG " %[new], %[ptr]\n"
+                    "setnz %b[r]\n"
+                    "2:"
+                    _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r])
+                    : [ptr] "+m" (*ptep_user),
+                      [old] "+a" (orig_pte),
+                      [r] "=q" (r)
+                    : [new] "r" (new_pte)
+                    : "memory");
+#else
+       asm volatile("1:" LOCK_PREFIX "cmpxchg8b %[ptr]\n"
+                    "setnz %b[r]\n"
+                    "2:"
+                    _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r])
+                    : [ptr] "+m" (*ptep_user),
+                      [old] "+A" (orig_pte),
+                      [r] "=q" (r)
+                    : [new_lo] "b" ((u32)new_pte),
+                      [new_hi] "c" ((u32)(new_pte >> 32))
+                    : "memory");
+#endif
+
+       user_access_end();
+       return r;
 }
 
 static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
@@ -339,7 +333,7 @@ static inline bool FNAME(is_last_gpte)(struct kvm_mmu *mmu,
  */
 static int FNAME(walk_addr_generic)(struct guest_walker *walker,
                                    struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-                                   gpa_t addr, u32 access)
+                                   gpa_t addr, u64 access)
 {
        int ret;
        pt_element_t pte;
@@ -347,7 +341,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
        gfn_t table_gfn;
        u64 pt_access, pte_access;
        unsigned index, accessed_dirty, pte_pkey;
-       unsigned nested_access;
+       u64 nested_access;
        gpa_t pte_gpa;
        bool have_ad;
        int offset;
@@ -540,7 +534,7 @@ error:
 }
 
 static int FNAME(walk_addr)(struct guest_walker *walker,
-                           struct kvm_vcpu *vcpu, gpa_t addr, u32 access)
+                           struct kvm_vcpu *vcpu, gpa_t addr, u64 access)
 {
        return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr,
                                        access);
@@ -988,7 +982,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
 
 /* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-                              gpa_t addr, u32 access,
+                              gpa_t addr, u64 access,
                               struct x86_exception *exception)
 {
        struct guest_walker walker;
index e7e7876251b35ee33458176a4ea406f194f0cf76..c472769e0300513d2409f3b0a964565bb55dc709 100644 (file)
@@ -14,21 +14,24 @@ static bool __read_mostly tdp_mmu_enabled = true;
 module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
 
 /* Initializes the TDP MMU for the VM, if enabled. */
-bool kvm_mmu_init_tdp_mmu(struct kvm *kvm)
+int kvm_mmu_init_tdp_mmu(struct kvm *kvm)
 {
+       struct workqueue_struct *wq;
+
        if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
-               return false;
+               return 0;
+
+       wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0);
+       if (!wq)
+               return -ENOMEM;
 
        /* This should not be changed for the lifetime of the VM. */
        kvm->arch.tdp_mmu_enabled = true;
-
        INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
        spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
        INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
-       kvm->arch.tdp_mmu_zap_wq =
-               alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0);
-
-       return true;
+       kvm->arch.tdp_mmu_zap_wq = wq;
+       return 1;
 }
 
 /* Arbitrarily returns true so that this may be used in if statements. */
@@ -48,7 +51,7 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
        if (!kvm->arch.tdp_mmu_enabled)
                return;
 
-       flush_workqueue(kvm->arch.tdp_mmu_zap_wq);
+       /* Also waits for any queued work items.  */
        destroy_workqueue(kvm->arch.tdp_mmu_zap_wq);
 
        WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
@@ -906,10 +909,8 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
 }
 
 /*
- * Tears down the mappings for the range of gfns, [start, end), and frees the
- * non-root pages mapping GFNs strictly within that range. Returns true if
- * SPTEs have been cleared and a TLB flush is needed before releasing the
- * MMU lock.
+ * Zap leafs SPTEs for the range of gfns, [start, end). Returns true if SPTEs
+ * have been cleared and a TLB flush is needed before releasing the MMU lock.
  *
  * If can_yield is true, will release the MMU lock and reschedule if the
  * scheduler needs the CPU or there is contention on the MMU lock. If this
@@ -917,42 +918,25 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
  * the caller must ensure it does not supply too large a GFN range, or the
  * operation can cause a soft lockup.
  */
-static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
-                         gfn_t start, gfn_t end, bool can_yield, bool flush)
+static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
+                             gfn_t start, gfn_t end, bool can_yield, bool flush)
 {
-       bool zap_all = (start == 0 && end >= tdp_mmu_max_gfn_host());
        struct tdp_iter iter;
 
-       /*
-        * No need to try to step down in the iterator when zapping all SPTEs,
-        * zapping the top-level non-leaf SPTEs will recurse on their children.
-        */
-       int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
-
        end = min(end, tdp_mmu_max_gfn_host());
 
        lockdep_assert_held_write(&kvm->mmu_lock);
 
        rcu_read_lock();
 
-       for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
+       for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) {
                if (can_yield &&
                    tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
                        flush = false;
                        continue;
                }
 
-               if (!is_shadow_present_pte(iter.old_spte))
-                       continue;
-
-               /*
-                * If this is a non-last-level SPTE that covers a larger range
-                * than should be zapped, continue, and zap the mappings at a
-                * lower level, except when zapping all SPTEs.
-                */
-               if (!zap_all &&
-                   (iter.gfn < start ||
-                    iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
+               if (!is_shadow_present_pte(iter.old_spte) ||
                    !is_last_spte(iter.old_spte, iter.level))
                        continue;
 
@@ -960,17 +944,13 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
                flush = true;
        }
 
-       /*
-        * Need to flush before releasing RCU.  TODO: do it only if intermediate
-        * page tables were zapped; there is no need to flush under RCU protection
-        * if no 'struct kvm_mmu_page' is freed.
-        */
-       if (flush)
-               kvm_flush_remote_tlbs_with_address(kvm, start, end - start);
-
        rcu_read_unlock();
 
-       return false;
+       /*
+        * Because this flow zaps _only_ leaf SPTEs, the caller doesn't need
+        * to provide RCU protection as no 'struct kvm_mmu_page' will be freed.
+        */
+       return flush;
 }
 
 /*
@@ -979,13 +959,13 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
  * SPTEs have been cleared and a TLB flush is needed before releasing the
  * MMU lock.
  */
-bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
-                                gfn_t end, bool can_yield, bool flush)
+bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end,
+                          bool can_yield, bool flush)
 {
        struct kvm_mmu_page *root;
 
        for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
-               flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
+               flush = tdp_mmu_zap_leafs(kvm, root, start, end, can_yield, flush);
 
        return flush;
 }
@@ -1233,8 +1213,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
                                 bool flush)
 {
-       return __kvm_tdp_mmu_zap_gfn_range(kvm, range->slot->as_id, range->start,
-                                          range->end, range->may_block, flush);
+       return kvm_tdp_mmu_zap_leafs(kvm, range->slot->as_id, range->start,
+                                    range->end, range->may_block, flush);
 }
 
 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
index 5e5ef2576c811a91492477b0e6dde3785fccbb8b..c163f7cc23ca54aa93d466447d9aaf7ab176bf41 100644 (file)
@@ -15,14 +15,8 @@ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
                          bool shared);
 
-bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
+bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start,
                                 gfn_t end, bool can_yield, bool flush);
-static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id,
-                                            gfn_t start, gfn_t end, bool flush)
-{
-       return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush);
-}
-
 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
 void kvm_tdp_mmu_zap_all(struct kvm *kvm);
 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
@@ -72,7 +66,7 @@ u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
                                        u64 *spte);
 
 #ifdef CONFIG_X86_64
-bool kvm_mmu_init_tdp_mmu(struct kvm *kvm);
+int kvm_mmu_init_tdp_mmu(struct kvm *kvm);
 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
 
@@ -93,7 +87,7 @@ static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
        return sp && is_tdp_mmu_page(sp) && sp->root_count;
 }
 #else
-static inline bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return false; }
+static inline int kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return 0; }
 static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
 static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }
index b1a02993782b376cfb5460ce786644115811b0ce..eca39f56c23153556c094104b59ea52a1b1e0d91 100644 (file)
@@ -96,8 +96,7 @@ static void kvm_perf_overflow(struct perf_event *perf_event,
 
 static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
                                  u64 config, bool exclude_user,
-                                 bool exclude_kernel, bool intr,
-                                 bool in_tx, bool in_tx_cp)
+                                 bool exclude_kernel, bool intr)
 {
        struct perf_event *event;
        struct perf_event_attr attr = {
@@ -116,16 +115,14 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
 
        attr.sample_period = get_sample_period(pmc, pmc->counter);
 
-       if (in_tx)
-               attr.config |= HSW_IN_TX;
-       if (in_tx_cp) {
+       if ((attr.config & HSW_IN_TX_CHECKPOINTED) &&
+           guest_cpuid_is_intel(pmc->vcpu)) {
                /*
                 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
                 * period. Just clear the sample period so at least
                 * allocating the counter doesn't fail.
                 */
                attr.sample_period = 0;
-               attr.config |= HSW_IN_TX_CHECKPOINTED;
        }
 
        event = perf_event_create_kernel_counter(&attr, -1, current,
@@ -185,6 +182,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
        u32 type = PERF_TYPE_RAW;
        struct kvm *kvm = pmc->vcpu->kvm;
        struct kvm_pmu_event_filter *filter;
+       struct kvm_pmu *pmu = vcpu_to_pmu(pmc->vcpu);
        bool allow_event = true;
 
        if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
@@ -221,7 +219,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
        }
 
        if (type == PERF_TYPE_RAW)
-               config = eventsel & AMD64_RAW_EVENT_MASK;
+               config = eventsel & pmu->raw_event_mask;
 
        if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
                return;
@@ -232,9 +230,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
        pmc_reprogram_counter(pmc, type, config,
                              !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
                              !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
-                             eventsel & ARCH_PERFMON_EVENTSEL_INT,
-                             (eventsel & HSW_IN_TX),
-                             (eventsel & HSW_IN_TX_CHECKPOINTED));
+                             eventsel & ARCH_PERFMON_EVENTSEL_INT);
 }
 EXPORT_SYMBOL_GPL(reprogram_gp_counter);
 
@@ -270,7 +266,7 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
                              kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc),
                              !(en_field & 0x2), /* exclude user */
                              !(en_field & 0x1), /* exclude kernel */
-                             pmi, false, false);
+                             pmi);
 }
 EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
 
index b37b353ec0869bda8aaa2661a714f2d6a92061ca..421619540ff9d972ee7a441d7404f61fd5e957ba 100644 (file)
@@ -726,7 +726,7 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
 {
        struct kvm_kernel_irq_routing_entry *e;
        struct kvm_irq_routing_table *irq_rt;
-       int idx, ret = -EINVAL;
+       int idx, ret = 0;
 
        if (!kvm_arch_has_assigned_device(kvm) ||
            !irq_remapping_cap(IRQ_POSTING_CAP))
@@ -737,7 +737,13 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
 
        idx = srcu_read_lock(&kvm->irq_srcu);
        irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
-       WARN_ON(guest_irq >= irq_rt->nr_rt_entries);
+
+       if (guest_irq >= irq_rt->nr_rt_entries ||
+               hlist_empty(&irq_rt->map[guest_irq])) {
+               pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
+                            guest_irq, irq_rt->nr_rt_entries);
+               goto out;
+       }
 
        hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
                struct vcpu_data vcpu_info;
@@ -822,7 +828,7 @@ out:
        return ret;
 }
 
-bool avic_check_apicv_inhibit_reasons(ulong bit)
+bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason)
 {
        ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
                          BIT(APICV_INHIBIT_REASON_ABSENT) |
@@ -831,9 +837,10 @@ bool avic_check_apicv_inhibit_reasons(ulong bit)
                          BIT(APICV_INHIBIT_REASON_IRQWIN) |
                          BIT(APICV_INHIBIT_REASON_PIT_REINJ) |
                          BIT(APICV_INHIBIT_REASON_X2APIC) |
-                         BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
+                         BIT(APICV_INHIBIT_REASON_BLOCKIRQ) |
+                         BIT(APICV_INHIBIT_REASON_SEV);
 
-       return supported & BIT(bit);
+       return supported & BIT(reason);
 }
 
 
index d4de5240933527b23604ed7703d14798a3bab065..24eb935b6f85c309fe0adafc600a4be098e02d16 100644 (file)
@@ -262,12 +262,10 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        /* MSR_EVNTSELn */
        pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
        if (pmc) {
-               if (data == pmc->eventsel)
-                       return 0;
-               if (!(data & pmu->reserved_bits)) {
+               data &= ~pmu->reserved_bits;
+               if (data != pmc->eventsel)
                        reprogram_gp_counter(pmc, data);
-                       return 0;
-               }
+               return 0;
        }
 
        return 1;
@@ -284,6 +282,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
 
        pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
        pmu->reserved_bits = 0xfffffff000280000ull;
+       pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
        pmu->version = 1;
        /* not applicable to AMD; but clean them to prevent any fall out */
        pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
index 75fa6dd268f056644446ef5db4c596658d08097b..537aaddc852fc420b62bef0c4785bfb7c7b86eec 100644 (file)
@@ -260,6 +260,8 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
        INIT_LIST_HEAD(&sev->regions_list);
        INIT_LIST_HEAD(&sev->mirror_vms);
 
+       kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_SEV);
+
        return 0;
 
 e_free:
@@ -465,6 +467,7 @@ static void sev_clflush_pages(struct page *pages[], unsigned long npages)
                page_virtual = kmap_atomic(pages[i]);
                clflush_cache_range(page_virtual, PAGE_SIZE);
                kunmap_atomic(page_virtual);
+               cond_resched();
        }
 }
 
index 0884c3414a1b246e43f2d3202ef784d5dfdb350e..bd4c64b362d24a06c9a2e48de0de9803d700c47f 100644 (file)
@@ -62,20 +62,8 @@ MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
 #define SEG_TYPE_LDT 2
 #define SEG_TYPE_BUSY_TSS16 3
 
-#define SVM_FEATURE_LBRV           (1 <<  1)
-#define SVM_FEATURE_SVML           (1 <<  2)
-#define SVM_FEATURE_TSC_RATE       (1 <<  4)
-#define SVM_FEATURE_VMCB_CLEAN     (1 <<  5)
-#define SVM_FEATURE_FLUSH_ASID     (1 <<  6)
-#define SVM_FEATURE_DECODE_ASSIST  (1 <<  7)
-#define SVM_FEATURE_PAUSE_FILTER   (1 << 10)
-
 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
 
-#define TSC_RATIO_RSVD          0xffffff0000000000ULL
-#define TSC_RATIO_MIN          0x0000000000000001ULL
-#define TSC_RATIO_MAX          0x000000ffffffffffULL
-
 static bool erratum_383_found __read_mostly;
 
 u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
@@ -87,7 +75,6 @@ u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
 static uint64_t osvw_len = 4, osvw_status;
 
 static DEFINE_PER_CPU(u64, current_tsc_ratio);
-#define TSC_RATIO_DEFAULT      0x0100000000ULL
 
 static const struct svm_direct_access_msrs {
        u32 index;   /* Index of the MSR */
@@ -480,7 +467,7 @@ static void svm_hardware_disable(void)
 {
        /* Make sure we clean up behind us */
        if (tsc_scaling)
-               wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
+               wrmsrl(MSR_AMD64_TSC_RATIO, SVM_TSC_RATIO_DEFAULT);
 
        cpu_svm_disable();
 
@@ -526,8 +513,8 @@ static int svm_hardware_enable(void)
                 * Set the default value, even if we don't use TSC scaling
                 * to avoid having stale value in the msr
                 */
-               wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
-               __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
+               wrmsrl(MSR_AMD64_TSC_RATIO, SVM_TSC_RATIO_DEFAULT);
+               __this_cpu_write(current_tsc_ratio, SVM_TSC_RATIO_DEFAULT);
        }
 
 
@@ -2723,7 +2710,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
                        break;
                }
 
-               if (data & TSC_RATIO_RSVD)
+               if (data & SVM_TSC_RATIO_RSVD)
                        return 1;
 
                svm->tsc_ratio_msr = data;
@@ -2918,7 +2905,7 @@ static int interrupt_window_interception(struct kvm_vcpu *vcpu)
         * In this case AVIC was temporarily disabled for
         * requesting the IRQ window and we have to re-enable it.
         */
-       kvm_request_apicv_update(vcpu->kvm, true, APICV_INHIBIT_REASON_IRQWIN);
+       kvm_clear_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
 
        ++vcpu->stat.irq_window_exits;
        return 1;
@@ -3516,7 +3503,7 @@ static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
                 * via AVIC. In such case, we need to temporarily disable AVIC,
                 * and fallback to injecting IRQ via V_IRQ.
                 */
-               kvm_request_apicv_update(vcpu->kvm, false, APICV_INHIBIT_REASON_IRQWIN);
+               kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
                svm_set_vintr(svm);
        }
 }
@@ -3948,6 +3935,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        struct kvm_cpuid_entry2 *best;
+       struct kvm *kvm = vcpu->kvm;
 
        vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
                                    boot_cpu_has(X86_FEATURE_XSAVE) &&
@@ -3974,16 +3962,14 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
                 * is exposed to the guest, disable AVIC.
                 */
                if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC))
-                       kvm_request_apicv_update(vcpu->kvm, false,
-                                                APICV_INHIBIT_REASON_X2APIC);
+                       kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_X2APIC);
 
                /*
                 * Currently, AVIC does not work with nested virtualization.
                 * So, we disable AVIC when cpuid for SVM is set in the L1 guest.
                 */
                if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM))
-                       kvm_request_apicv_update(vcpu->kvm, false,
-                                                APICV_INHIBIT_REASON_NESTED);
+                       kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_NESTED);
        }
        init_vmcb_after_set_cpuid(vcpu);
 }
@@ -4766,10 +4752,10 @@ static __init int svm_hardware_setup(void)
                } else {
                        pr_info("TSC scaling supported\n");
                        kvm_has_tsc_control = true;
-                       kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
-                       kvm_tsc_scaling_ratio_frac_bits = 32;
                }
        }
+       kvm_max_tsc_scaling_ratio = SVM_TSC_RATIO_MAX;
+       kvm_tsc_scaling_ratio_frac_bits = 32;
 
        tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX);
 
index e37bb3508cfac4ba97a49b16fe6a936afe0632af..f77a7d2d39dd6dfe30b94b5cca2665435f378a00 100644 (file)
@@ -22,6 +22,8 @@
 #include <asm/svm.h>
 #include <asm/sev-common.h>
 
+#include "kvm_cache_regs.h"
+
 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
 
 #define        IOPM_SIZE PAGE_SIZE * 3
@@ -569,17 +571,6 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
 
 /* avic.c */
 
-#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK   (0xFF)
-#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT                        31
-#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK               (1 << 31)
-
-#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK   GENMASK_ULL(11, 0)
-#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK       (0xFFFFFFFFFFULL << 12)
-#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK         (1ULL << 62)
-#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK              (1ULL << 63)
-
-#define VMCB_AVIC_APIC_BAR_MASK                0xFFFFFFFFFF000ULL
-
 int avic_ga_log_notifier(u32 ga_tag);
 void avic_vm_destroy(struct kvm *kvm);
 int avic_vm_init(struct kvm *kvm);
@@ -592,7 +583,7 @@ void __avic_vcpu_put(struct kvm_vcpu *vcpu);
 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
 void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
-bool avic_check_apicv_inhibit_reasons(ulong bit);
+bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason);
 void avic_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
 void avic_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
 bool avic_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
index 98aa981c04ec5ff8db55e6f1dfa20cc912af4cc0..8cdc62c74a964e4369d0d1a4e09c60cde740095b 100644 (file)
@@ -4,7 +4,6 @@
  */
 
 #include <linux/kvm_host.h>
-#include "kvm_cache_regs.h"
 
 #include <asm/mshyperv.h>
 
index 193f5ba930d12f422c195eecaf00006d023bbc15..e3a24b8f04be83d4425956567ddfc36593bbaf1c 100644 (file)
@@ -1339,23 +1339,25 @@ TRACE_EVENT(kvm_hv_stimer_cleanup,
                  __entry->vcpu_id, __entry->timer_index)
 );
 
-TRACE_EVENT(kvm_apicv_update_request,
-           TP_PROTO(bool activate, unsigned long bit),
-           TP_ARGS(activate, bit),
+TRACE_EVENT(kvm_apicv_inhibit_changed,
+           TP_PROTO(int reason, bool set, unsigned long inhibits),
+           TP_ARGS(reason, set, inhibits),
 
        TP_STRUCT__entry(
-               __field(bool, activate)
-               __field(unsigned long, bit)
+               __field(int, reason)
+               __field(bool, set)
+               __field(unsigned long, inhibits)
        ),
 
        TP_fast_assign(
-               __entry->activate = activate;
-               __entry->bit = bit;
+               __entry->reason = reason;
+               __entry->set = set;
+               __entry->inhibits = inhibits;
        ),
 
-       TP_printk("%s bit=%lu",
-                 __entry->activate ? "activate" : "deactivate",
-                 __entry->bit)
+       TP_printk("%s reason=%u, inhibits=0x%lx",
+                 __entry->set ? "set" : "cleared",
+                 __entry->reason, __entry->inhibits)
 );
 
 TRACE_EVENT(kvm_apicv_accept_irq,
index 0684e519181bef41c78ccd9cd6c2ad5002d81685..bc3f8512bb646d76339886342699d369dd827a5b 100644 (file)
@@ -389,6 +389,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        struct kvm_pmc *pmc;
        u32 msr = msr_info->index;
        u64 data = msr_info->data;
+       u64 reserved_bits;
 
        switch (msr) {
        case MSR_CORE_PERF_FIXED_CTR_CTRL:
@@ -443,7 +444,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
                        if (data == pmc->eventsel)
                                return 0;
-                       if (!(data & pmu->reserved_bits)) {
+                       reserved_bits = pmu->reserved_bits;
+                       if ((pmc->idx == 2) &&
+                           (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
+                               reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
+                       if (!(data & reserved_bits)) {
                                reprogram_gp_counter(pmc, data);
                                return 0;
                        }
@@ -485,6 +490,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
        pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
        pmu->version = 0;
        pmu->reserved_bits = 0xffffffff00200000ull;
+       pmu->raw_event_mask = X86_RAW_EVENT_MASK;
 
        entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
        if (!entry || !vcpu->kvm->arch.enable_pmu)
@@ -533,8 +539,10 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
        entry = kvm_find_cpuid_entry(vcpu, 7, 0);
        if (entry &&
            (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
-           (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
-               pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
+           (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) {
+               pmu->reserved_bits ^= HSW_IN_TX;
+               pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
+       }
 
        bitmap_set(pmu->all_valid_pmc_idx,
                0, pmu->nr_arch_gp_counters);
index e8963f5af6183467ee24569a1301122f56358d69..04d170c4b61eb48688b85d60b961d5b539509cb4 100644 (file)
@@ -2866,21 +2866,17 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       struct vmx_uret_msr *msr = vmx_find_uret_msr(vmx, MSR_EFER);
 
        /* Nothing to do if hardware doesn't support EFER. */
-       if (!msr)
+       if (!vmx_find_uret_msr(vmx, MSR_EFER))
                return 0;
 
        vcpu->arch.efer = efer;
-       if (efer & EFER_LMA) {
-               vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
-               msr->data = efer;
-       } else {
-               vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
+       if (efer & EFER_LMA)
+               vm_entry_controls_setbit(vmx, VM_ENTRY_IA32E_MODE);
+       else
+               vm_entry_controls_clearbit(vmx, VM_ENTRY_IA32E_MODE);
 
-               msr->data = efer & ~EFER_LME;
-       }
        vmx_setup_uret_msrs(vmx);
        return 0;
 }
@@ -2906,7 +2902,6 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
 
 static void exit_lmode(struct kvm_vcpu *vcpu)
 {
-       vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
        vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
 }
 
@@ -7705,14 +7700,14 @@ static void vmx_hardware_unsetup(void)
        free_kvm_area();
 }
 
-static bool vmx_check_apicv_inhibit_reasons(ulong bit)
+static bool vmx_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason)
 {
        ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
                          BIT(APICV_INHIBIT_REASON_ABSENT) |
                          BIT(APICV_INHIBIT_REASON_HYPERV) |
                          BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
 
-       return supported & BIT(bit);
+       return supported & BIT(reason);
 }
 
 static struct kvm_x86_ops vmx_x86_ops __initdata = {
@@ -7980,12 +7975,11 @@ static __init int hardware_setup(void)
        if (!enable_apicv)
                vmx_x86_ops.sync_pir_to_irr = NULL;
 
-       if (cpu_has_vmx_tsc_scaling()) {
+       if (cpu_has_vmx_tsc_scaling())
                kvm_has_tsc_control = true;
-               kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX;
-               kvm_tsc_scaling_ratio_frac_bits = 48;
-       }
 
+       kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX;
+       kvm_tsc_scaling_ratio_frac_bits = 48;
        kvm_has_bus_lock_exit = cpu_has_vmx_bus_lock_detection();
 
        set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
index 02cf0a7e1d144f31202404009148b5c06e0f4bf8..547ba00ef64fc3d12f2e3457221b2b96685688e0 100644 (file)
@@ -1748,9 +1748,6 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
 {
        struct msr_data msr;
 
-       if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
-               return KVM_MSR_RET_FILTERED;
-
        switch (index) {
        case MSR_FS_BASE:
        case MSR_GS_BASE:
@@ -1832,9 +1829,6 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
        struct msr_data msr;
        int ret;
 
-       if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
-               return KVM_MSR_RET_FILTERED;
-
        switch (index) {
        case MSR_TSC_AUX:
                if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
@@ -1871,6 +1865,20 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
        return ret;
 }
 
+static int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data)
+{
+       if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
+               return KVM_MSR_RET_FILTERED;
+       return kvm_get_msr_ignored_check(vcpu, index, data, false);
+}
+
+static int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data)
+{
+       if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
+               return KVM_MSR_RET_FILTERED;
+       return kvm_set_msr_ignored_check(vcpu, index, data, false);
+}
+
 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
 {
        return kvm_get_msr_ignored_check(vcpu, index, data, false);
@@ -1953,7 +1961,7 @@ int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
        u64 data;
        int r;
 
-       r = kvm_get_msr(vcpu, ecx, &data);
+       r = kvm_get_msr_with_filter(vcpu, ecx, &data);
 
        if (!r) {
                trace_kvm_msr_read(ecx, data);
@@ -1978,7 +1986,7 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
        u64 data = kvm_read_edx_eax(vcpu);
        int r;
 
-       r = kvm_set_msr(vcpu, ecx, data);
+       r = kvm_set_msr_with_filter(vcpu, ecx, data);
 
        if (!r) {
                trace_kvm_msr_write(ecx, data);
@@ -2893,7 +2901,7 @@ static void kvm_end_pvclock_update(struct kvm *kvm)
 
 static void kvm_update_masterclock(struct kvm *kvm)
 {
-       kvm_hv_invalidate_tsc_page(kvm);
+       kvm_hv_request_tsc_page_update(kvm);
        kvm_start_pvclock_update(kvm);
        pvclock_update_vm_gtod_copy(kvm);
        kvm_end_pvclock_update(kvm);
@@ -3105,8 +3113,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
                                       offsetof(struct compat_vcpu_info, time));
        if (vcpu->xen.vcpu_time_info_set)
                kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);
-       if (!v->vcpu_idx)
-               kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
+       kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
        return 0;
 }
 
@@ -5938,7 +5945,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
                smp_wmb();
                kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
                kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
-               kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
+               kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT);
                r = 0;
 split_irqchip_unlock:
                mutex_unlock(&kvm->lock);
@@ -6233,7 +6240,7 @@ static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp)
        if (data.flags & ~KVM_CLOCK_VALID_FLAGS)
                return -EINVAL;
 
-       kvm_hv_invalidate_tsc_page(kvm);
+       kvm_hv_request_tsc_page_update(kvm);
        kvm_start_pvclock_update(kvm);
        pvclock_update_vm_gtod_copy(kvm);
 
@@ -6335,7 +6342,7 @@ set_identity_unlock:
                /* Write kvm->irq_routing before enabling irqchip_in_kernel. */
                smp_wmb();
                kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
-               kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
+               kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT);
        create_irqchip_unlock:
                mutex_unlock(&kvm->lock);
                break;
@@ -6726,7 +6733,7 @@ void kvm_get_segment(struct kvm_vcpu *vcpu,
        static_call(kvm_x86_get_segment)(vcpu, var, seg);
 }
 
-gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
+gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
                           struct x86_exception *exception)
 {
        struct kvm_mmu *mmu = vcpu->arch.mmu;
@@ -6746,7 +6753,7 @@ gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
 {
        struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
 
-       u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
+       u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
        return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read);
@@ -6756,7 +6763,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read);
 {
        struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
 
-       u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
+       u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
        access |= PFERR_FETCH_MASK;
        return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
 }
@@ -6766,7 +6773,7 @@ gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
 {
        struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
 
-       u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
+       u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
        access |= PFERR_WRITE_MASK;
        return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
 }
@@ -6782,7 +6789,7 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
 }
 
 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
-                                     struct kvm_vcpu *vcpu, u32 access,
+                                     struct kvm_vcpu *vcpu, u64 access,
                                      struct x86_exception *exception)
 {
        struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
@@ -6819,7 +6826,7 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
 {
        struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
        struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
-       u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
+       u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
        unsigned offset;
        int ret;
 
@@ -6844,7 +6851,7 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
                               gva_t addr, void *val, unsigned int bytes,
                               struct x86_exception *exception)
 {
-       u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
+       u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
 
        /*
         * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
@@ -6863,9 +6870,11 @@ static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
                             struct x86_exception *exception, bool system)
 {
        struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
-       u32 access = 0;
+       u64 access = 0;
 
-       if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3)
+       if (system)
+               access |= PFERR_IMPLICIT_ACCESS;
+       else if (static_call(kvm_x86_get_cpl)(vcpu) == 3)
                access |= PFERR_USER_MASK;
 
        return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
@@ -6881,7 +6890,7 @@ static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
 }
 
 static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
-                                     struct kvm_vcpu *vcpu, u32 access,
+                                     struct kvm_vcpu *vcpu, u64 access,
                                      struct x86_exception *exception)
 {
        struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
@@ -6915,9 +6924,11 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v
                              bool system)
 {
        struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
-       u32 access = PFERR_WRITE_MASK;
+       u64 access = PFERR_WRITE_MASK;
 
-       if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3)
+       if (system)
+               access |= PFERR_IMPLICIT_ACCESS;
+       else if (static_call(kvm_x86_get_cpl)(vcpu) == 3)
                access |= PFERR_USER_MASK;
 
        return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
@@ -6984,7 +6995,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
                                bool write)
 {
        struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
-       u32 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0)
+       u64 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0)
                | (write ? PFERR_WRITE_MASK : 0);
 
        /*
@@ -7627,13 +7638,13 @@ static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
        return;
 }
 
-static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
-                           u32 msr_index, u64 *pdata)
+static int emulator_get_msr_with_filter(struct x86_emulate_ctxt *ctxt,
+                                       u32 msr_index, u64 *pdata)
 {
        struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
        int r;
 
-       r = kvm_get_msr(vcpu, msr_index, pdata);
+       r = kvm_get_msr_with_filter(vcpu, msr_index, pdata);
 
        if (r && kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_RDMSR, 0,
                                    complete_emulated_rdmsr, r)) {
@@ -7644,13 +7655,13 @@ static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
        return r;
 }
 
-static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
-                           u32 msr_index, u64 data)
+static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt,
+                                       u32 msr_index, u64 data)
 {
        struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
        int r;
 
-       r = kvm_set_msr(vcpu, msr_index, data);
+       r = kvm_set_msr_with_filter(vcpu, msr_index, data);
 
        if (r && kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_WRMSR, data,
                                    complete_emulated_msr_access, r)) {
@@ -7661,6 +7672,18 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
        return r;
 }
 
+static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
+                           u32 msr_index, u64 *pdata)
+{
+       return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
+}
+
+static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
+                           u32 msr_index, u64 data)
+{
+       return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
+}
+
 static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt)
 {
        struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
@@ -7724,6 +7747,11 @@ static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt)
        return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR);
 }
 
+static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt *ctxt)
+{
+       return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_RDPID);
+}
+
 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
 {
        return kvm_register_read_raw(emul_to_vcpu(ctxt), reg);
@@ -7794,6 +7822,8 @@ static const struct x86_emulate_ops emulate_ops = {
        .set_dr              = emulator_set_dr,
        .get_smbase          = emulator_get_smbase,
        .set_smbase          = emulator_set_smbase,
+       .set_msr_with_filter = emulator_set_msr_with_filter,
+       .get_msr_with_filter = emulator_get_msr_with_filter,
        .set_msr             = emulator_set_msr,
        .get_msr             = emulator_get_msr,
        .check_pmc           = emulator_check_pmc,
@@ -7806,6 +7836,7 @@ static const struct x86_emulate_ops emulate_ops = {
        .guest_has_long_mode = emulator_guest_has_long_mode,
        .guest_has_movbe     = emulator_guest_has_movbe,
        .guest_has_fxsr      = emulator_guest_has_fxsr,
+       .guest_has_rdpid     = emulator_guest_has_rdpid,
        .set_nmi_mask        = emulator_set_nmi_mask,
        .get_hflags          = emulator_get_hflags,
        .exiting_smm         = emulator_exiting_smm,
@@ -8894,7 +8925,7 @@ int kvm_arch_init(void *opaque)
        }
        kvm_nr_uret_msrs = 0;
 
-       r = kvm_mmu_module_init();
+       r = kvm_mmu_vendor_module_init();
        if (r)
                goto out_free_percpu;
 
@@ -8942,7 +8973,7 @@ void kvm_arch_exit(void)
        cancel_work_sync(&pvclock_gtod_work);
 #endif
        kvm_x86_ops.hardware_enable = NULL;
-       kvm_mmu_module_exit();
+       kvm_mmu_vendor_module_exit();
        free_percpu(user_return_msrs);
        kmem_cache_destroy(x86_emulator_cache);
 #ifdef CONFIG_KVM_XEN
@@ -9058,15 +9089,29 @@ bool kvm_apicv_activated(struct kvm *kvm)
 }
 EXPORT_SYMBOL_GPL(kvm_apicv_activated);
 
+
+static void set_or_clear_apicv_inhibit(unsigned long *inhibits,
+                                      enum kvm_apicv_inhibit reason, bool set)
+{
+       if (set)
+               __set_bit(reason, inhibits);
+       else
+               __clear_bit(reason, inhibits);
+
+       trace_kvm_apicv_inhibit_changed(reason, set, *inhibits);
+}
+
 static void kvm_apicv_init(struct kvm *kvm)
 {
+       unsigned long *inhibits = &kvm->arch.apicv_inhibit_reasons;
+
        init_rwsem(&kvm->arch.apicv_update_lock);
 
-       set_bit(APICV_INHIBIT_REASON_ABSENT,
-               &kvm->arch.apicv_inhibit_reasons);
+       set_or_clear_apicv_inhibit(inhibits, APICV_INHIBIT_REASON_ABSENT, true);
+
        if (!enable_apicv)
-               set_bit(APICV_INHIBIT_REASON_DISABLE,
-                       &kvm->arch.apicv_inhibit_reasons);
+               set_or_clear_apicv_inhibit(inhibits,
+                                          APICV_INHIBIT_REASON_ABSENT, true);
 }
 
 static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
@@ -9740,24 +9785,21 @@ out:
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv);
 
-void __kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
+void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
+                                     enum kvm_apicv_inhibit reason, bool set)
 {
        unsigned long old, new;
 
        lockdep_assert_held_write(&kvm->arch.apicv_update_lock);
 
-       if (!static_call(kvm_x86_check_apicv_inhibit_reasons)(bit))
+       if (!static_call(kvm_x86_check_apicv_inhibit_reasons)(reason))
                return;
 
        old = new = kvm->arch.apicv_inhibit_reasons;
 
-       if (activate)
-               __clear_bit(bit, &new);
-       else
-               __set_bit(bit, &new);
+       set_or_clear_apicv_inhibit(&new, reason, set);
 
        if (!!old != !!new) {
-               trace_kvm_apicv_update_request(activate, bit);
                /*
                 * Kick all vCPUs before setting apicv_inhibit_reasons to avoid
                 * false positives in the sanity check WARN in svm_vcpu_run().
@@ -9776,20 +9818,22 @@ void __kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
                        unsigned long gfn = gpa_to_gfn(APIC_DEFAULT_PHYS_BASE);
                        kvm_zap_gfn_range(kvm, gfn, gfn+1);
                }
-       } else
+       } else {
                kvm->arch.apicv_inhibit_reasons = new;
+       }
 }
 
-void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
+void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
+                                   enum kvm_apicv_inhibit reason, bool set)
 {
        if (!enable_apicv)
                return;
 
        down_write(&kvm->arch.apicv_update_lock);
-       __kvm_request_apicv_update(kvm, activate, bit);
+       __kvm_set_or_clear_apicv_inhibit(kvm, reason, set);
        up_write(&kvm->arch.apicv_update_lock);
 }
-EXPORT_SYMBOL_GPL(kvm_request_apicv_update);
+EXPORT_SYMBOL_GPL(kvm_set_or_clear_apicv_inhibit);
 
 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
 {
@@ -10937,7 +10981,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 
 static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
 {
-       bool inhibit = false;
+       bool set = false;
        struct kvm_vcpu *vcpu;
        unsigned long i;
 
@@ -10945,11 +10989,11 @@ static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
 
        kvm_for_each_vcpu(i, vcpu, kvm) {
                if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) {
-                       inhibit = true;
+                       set = true;
                        break;
                }
        }
-       __kvm_request_apicv_update(kvm, !inhibit, APICV_INHIBIT_REASON_BLOCKIRQ);
+       __kvm_set_or_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_BLOCKIRQ, set);
        up_write(&kvm->arch.apicv_update_lock);
 }
 
@@ -11557,10 +11601,8 @@ int kvm_arch_hardware_setup(void *opaque)
                u64 max = min(0x7fffffffULL,
                              __scale_tsc(kvm_max_tsc_scaling_ratio, tsc_khz));
                kvm_max_guest_tsc_khz = max;
-
-               kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
        }
-
+       kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
        kvm_init_msr_list();
        return 0;
 }
@@ -11629,12 +11671,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
        ret = kvm_page_track_init(kvm);
        if (ret)
-               return ret;
+               goto out;
+
+       ret = kvm_mmu_init_vm(kvm);
+       if (ret)
+               goto out_page_track;
 
        INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
-       INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
-       INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
-       INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages);
        INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
        atomic_set(&kvm->arch.noncoherent_dma_count, 0);
 
@@ -11666,10 +11709,14 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
        kvm_apicv_init(kvm);
        kvm_hv_init_vm(kvm);
-       kvm_mmu_init_vm(kvm);
        kvm_xen_init_vm(kvm);
 
        return static_call(kvm_x86_vm_init)(kvm);
+
+out_page_track:
+       kvm_page_track_cleanup(kvm);
+out:
+       return ret;
 }
 
 int kvm_arch_post_init_vm(struct kvm *kvm)
@@ -12593,7 +12640,7 @@ void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_c
 {
        struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
        struct x86_exception fault;
-       u32 access = error_code &
+       u64 access = error_code &
                (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK);
 
        if (!(error_code & PFERR_PRESENT_MASK) ||
@@ -12933,9 +12980,24 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log);
-EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_update_request);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_accept_irq);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit);
+
+static int __init kvm_x86_init(void)
+{
+       kvm_mmu_x86_module_init();
+       return 0;
+}
+module_init(kvm_x86_init);
+
+static void __exit kvm_x86_exit(void)
+{
+       /*
+        * If module_init() is implemented, module_exit() must also be
+        * implemented to allow module unload.
+        */
+}
+module_exit(kvm_x86_exit);
index 4aa0f2b3166507521e23cf224c1604afeb1eca07..bf6cc25eee7658b5c5cdf69776ca25331637608d 100644 (file)
@@ -39,8 +39,8 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
        }
 
        do {
-               ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, false, true,
-                                               gpa, PAGE_SIZE, false);
+               ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, KVM_HOST_USES_PFN,
+                                               gpa, PAGE_SIZE);
                if (ret)
                        goto out;
 
@@ -1025,8 +1025,7 @@ static int evtchn_set_fn(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm
                        break;
 
                idx = srcu_read_lock(&kvm->srcu);
-               rc = kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpc->gpa,
-                                                 PAGE_SIZE, false);
+               rc = kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpc->gpa, PAGE_SIZE);
                srcu_read_unlock(&kvm->srcu, idx);
        } while(!rc);
 
index 1f8a8f8951738514b85684ae2985e1be510e74dd..50734a23034c438487e060cde8304c2a5ad05af7 100644 (file)
@@ -93,7 +93,6 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
                buff += 8;
        }
        if (len & 7) {
-#ifdef CONFIG_DCACHE_WORD_ACCESS
                unsigned int shift = (8 - (len & 7)) * 8;
                unsigned long trail;
 
@@ -103,31 +102,6 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
                    "adcq $0,%[res]"
                        : [res] "+r" (temp64)
                        : [trail] "r" (trail));
-#else
-               if (len & 4) {
-                       asm("addq %[val],%[res]\n\t"
-                           "adcq $0,%[res]"
-                               : [res] "+r" (temp64)
-                               : [val] "r" ((u64)*(u32 *)buff)
-                               : "memory");
-                       buff += 4;
-               }
-               if (len & 2) {
-                       asm("addq %[val],%[res]\n\t"
-                           "adcq $0,%[res]"
-                               : [res] "+r" (temp64)
-                               : [val] "r" ((u64)*(u16 *)buff)
-                               : "memory");
-                       buff += 2;
-               }
-               if (len & 1) {
-                       asm("addq %[val],%[res]\n\t"
-                           "adcq $0,%[res]"
-                               : [res] "+r" (temp64)
-                               : [val] "r" ((u64)*(u8 *)buff)
-                               : "memory");
-               }
-#endif
        }
        result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
        if (unlikely(odd)) {
index df50451d94ef7edf095defd0253aa7bb329123ab..3e2f33fc33de24a77626e2c96702b79ec57fcd2c 100644 (file)
@@ -22,7 +22,7 @@ static __always_inline void rep_movs(void *to, const void *from, size_t n)
                     : "memory");
 }
 
-void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
+static void string_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
 {
        if (unlikely(!n))
                return;
@@ -38,9 +38,8 @@ void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
        }
        rep_movs(to, (const void *)from, n);
 }
-EXPORT_SYMBOL(memcpy_fromio);
 
-void memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
+static void string_memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
 {
        if (unlikely(!n))
                return;
@@ -56,14 +55,64 @@ void memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
        }
        rep_movs((void *)to, (const void *) from, n);
 }
+
+static void unrolled_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
+{
+       const volatile char __iomem *in = from;
+       char *out = to;
+       int i;
+
+       for (i = 0; i < n; ++i)
+               out[i] = readb(&in[i]);
+}
+
+static void unrolled_memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
+{
+       volatile char __iomem *out = to;
+       const char *in = from;
+       int i;
+
+       for (i = 0; i < n; ++i)
+               writeb(in[i], &out[i]);
+}
+
+static void unrolled_memset_io(volatile void __iomem *a, int b, size_t c)
+{
+       volatile char __iomem *mem = a;
+       int i;
+
+       for (i = 0; i < c; ++i)
+               writeb(b, &mem[i]);
+}
+
+void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
+{
+       if (cc_platform_has(CC_ATTR_GUEST_UNROLL_STRING_IO))
+               unrolled_memcpy_fromio(to, from, n);
+       else
+               string_memcpy_fromio(to, from, n);
+}
+EXPORT_SYMBOL(memcpy_fromio);
+
+void memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
+{
+       if (cc_platform_has(CC_ATTR_GUEST_UNROLL_STRING_IO))
+               unrolled_memcpy_toio(to, from, n);
+       else
+               string_memcpy_toio(to, from, n);
+}
 EXPORT_SYMBOL(memcpy_toio);
 
 void memset_io(volatile void __iomem *a, int b, size_t c)
 {
-       /*
-        * TODO: memset can mangle the IO patterns quite a bit.
-        * perhaps it would be better to use a dumb one:
-        */
-       memset((void *)a, b, c);
+       if (cc_platform_has(CC_ATTR_GUEST_UNROLL_STRING_IO)) {
+               unrolled_memset_io(a, b, c);
+       } else {
+               /*
+                * TODO: memset can mangle the IO patterns quite a bit.
+                * perhaps it would be better to use a dumb one:
+                */
+               memset((void *)a, b, c);
+       }
 }
 EXPORT_SYMBOL(memset_io);
index 0402a749f3a0ee188261cd248f5c5fdb8404817a..0ae6cf804197053e2bc1ab4f8bfc6eede4f13d84 100644 (file)
@@ -119,7 +119,7 @@ void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
 
        /* cache copy and flush to align dest */
        if (!IS_ALIGNED(dest, 8)) {
-               unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest);
+               size_t len = min_t(size_t, size, ALIGN(dest, 8) - dest);
 
                memcpy((void *) dest, (void *) source, len);
                clean_cache_range((void *) dest, len);
index 6eb4d91d5365563024f44d8cb92f4df599ef1a0e..d400b6d9d246b93c5a274a579997a3054a5b2e22 100644 (file)
@@ -855,13 +855,11 @@ done:
                        nr_invalidate);
 }
 
-static bool tlb_is_not_lazy(int cpu)
+static bool tlb_is_not_lazy(int cpu, void *data)
 {
        return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
 }
 
-static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask);
-
 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
 EXPORT_PER_CPU_SYMBOL(cpu_tlbstate_shared);
 
@@ -890,36 +888,11 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
         * up on the new contents of what used to be page tables, while
         * doing a speculative memory access.
         */
-       if (info->freed_tables) {
+       if (info->freed_tables)
                on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true);
-       } else {
-               /*
-                * Although we could have used on_each_cpu_cond_mask(),
-                * open-coding it has performance advantages, as it eliminates
-                * the need for indirect calls or retpolines. In addition, it
-                * allows to use a designated cpumask for evaluating the
-                * condition, instead of allocating one.
-                *
-                * This code works under the assumption that there are no nested
-                * TLB flushes, an assumption that is already made in
-                * flush_tlb_mm_range().
-                *
-                * cond_cpumask is logically a stack-local variable, but it is
-                * more efficient to have it off the stack and not to allocate
-                * it on demand. Preemption is disabled and this code is
-                * non-reentrant.
-                */
-               struct cpumask *cond_cpumask = this_cpu_ptr(&flush_tlb_mask);
-               int cpu;
-
-               cpumask_clear(cond_cpumask);
-
-               for_each_cpu(cpu, cpumask) {
-                       if (tlb_is_not_lazy(cpu))
-                               __cpumask_set_cpu(cpu, cond_cpumask);
-               }
-               on_each_cpu_mask(cond_cpumask, flush_tlb_func, (void *)info, true);
-       }
+       else
+               on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func,
+                               (void *)info, 1, cpumask);
 }
 
 void flush_tlb_multi(const struct cpumask *cpumask,
index 8fe35ed11fd665f1d80b110a516219cdfc4de579..16b6efacf7c6770706f9ef3846a85f6c2af7b2af 100644 (file)
@@ -412,6 +412,7 @@ static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
                EMIT_LFENCE();
                EMIT2(0xFF, 0xE0 + reg);
        } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
+               OPTIMIZER_HIDE_VAR(reg);
                emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
        } else
 #endif
index 9f2b251e83c566cad8117fc5316d2c3e2ac9378a..3822666fb73d521a6b96c78b99d59f7fe72b4147 100644 (file)
@@ -40,7 +40,8 @@ static void msr_save_context(struct saved_context *ctxt)
        struct saved_msr *end = msr + ctxt->saved_msrs.num;
 
        while (msr < end) {
-               msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q);
+               if (msr->valid)
+                       rdmsrl(msr->info.msr_no, msr->info.reg.q);
                msr++;
        }
 }
@@ -424,8 +425,10 @@ static int msr_build_context(const u32 *msr_id, const int num)
        }
 
        for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
+               u64 dummy;
+
                msr_array[i].info.msr_no        = msr_id[j];
-               msr_array[i].valid              = false;
+               msr_array[i].valid              = !rdmsrl_safe(msr_id[j], &dummy);
                msr_array[i].info.reg.q         = 0;
        }
        saved_msrs->num   = total_num;
@@ -500,10 +503,24 @@ static int pm_cpu_check(const struct x86_cpu_id *c)
        return ret;
 }
 
+static void pm_save_spec_msr(void)
+{
+       u32 spec_msr_id[] = {
+               MSR_IA32_SPEC_CTRL,
+               MSR_IA32_TSX_CTRL,
+               MSR_TSX_FORCE_ABORT,
+               MSR_IA32_MCU_OPT_CTRL,
+               MSR_AMD64_LS_CFG,
+       };
+
+       msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id));
+}
+
 static int pm_check_save_msr(void)
 {
        dmi_check_system(msr_save_dmi_table);
        pm_cpu_check(msr_save_cpu_table);
+       pm_save_spec_msr();
 
        return 0;
 }
index ead7e5b3a97573defc132f412bbacc5090dcb1ee..1bcd42c5303940c482002c6ddcf75fa6d8270915 100644 (file)
@@ -9,6 +9,7 @@ endmenu
 config UML_X86
        def_bool y
        select ARCH_BINFMT_ELF_EXTRA_PHDRS if X86_32
+       select DCACHE_WORD_ACCESS
 
 config 64BIT
        bool "64-bit kernel" if "$(SUBARCH)" = "x86"
index 48d6cd12f8a5e62077b440684fcae086012ec164..b6b997225841c661991c81d62a06869d71d08b13 100644 (file)
 #include <linux/msg.h>
 #include <linux/shm.h>
 
-typedef long syscall_handler_t(void);
+typedef long syscall_handler_t(long, long, long, long, long, long);
 
 extern syscall_handler_t *sys_call_table[];
 
 #define EXECUTE_SYSCALL(syscall, regs) \
-       (((long (*)(long, long, long, long, long, long)) \
-         (*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(&regs->regs), \
+       (((*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(&regs->regs), \
                                      UPT_SYSCALL_ARG2(&regs->regs), \
                                      UPT_SYSCALL_ARG3(&regs->regs), \
                                      UPT_SYSCALL_ARG4(&regs->regs), \
index fe5323f0c42da6a2a81e881d6047357c5ef46a79..27b29ae6c471b893dc2d92d7562eaa188cd5b9c2 100644 (file)
@@ -12,7 +12,6 @@
 #include <asm/prctl.h> /* XXX This should get the constants from libc */
 #include <registers.h>
 #include <os.h>
-#include <registers.h>
 
 long arch_prctl(struct task_struct *task, int option,
                unsigned long __user *arg2)
index 45cc0ae0af6f966ed778253b9624d46a783c2152..c7b9f12896f20a6870588cd50e9849e11c992dcf 100644 (file)
@@ -29,7 +29,7 @@
        .if XTENSA_HAVE_COPROCESSOR(x);                                 \
                .align 4;                                               \
        .Lsave_cp_regs_cp##x:                                           \
-               xchal_cp##x##_store a2 a4 a5 a6 a7;                     \
+               xchal_cp##x##_store a2 a3 a4 a5 a6;                     \
                jx      a0;                                             \
        .endif
 
@@ -46,7 +46,7 @@
        .if XTENSA_HAVE_COPROCESSOR(x);                                 \
                .align 4;                                               \
        .Lload_cp_regs_cp##x:                                           \
-               xchal_cp##x##_load a2 a4 a5 a6 a7;                      \
+               xchal_cp##x##_load a2 a3 a4 a5 a6;                      \
                jx      a0;                                             \
        .endif
 
index 0dde21e0d3de4c2836bbce5c7fee361811863ec8..ad1841cecdfb769a32c7ed61fa62d027cde4bbcf 100644 (file)
@@ -40,7 +40,7 @@ static int patch_text_stop_machine(void *data)
 {
        struct patch *patch = data;
 
-       if (atomic_inc_return(&patch->cpu_count) == 1) {
+       if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
                local_patch_text(patch->addr, patch->data, patch->sz);
                atomic_inc(&patch->cpu_count);
        } else {
index 6713c65a25e15cfe96824cbcdba6bb8f222ef274..b265e4bc16c2e0bc52c63819fda704ebda00de31 100644 (file)
@@ -2,8 +2,7 @@
 kapi := arch/$(SRCARCH)/include/generated/asm
 uapi := arch/$(SRCARCH)/include/generated/uapi/asm
 
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')      \
-         $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
 
 syscall := $(src)/syscall.tbl
 syshdr := $(srctree)/scripts/syscallhdr.sh
index 81d7c7e8f7e96077e2a26f69d173af19f28a5f18..10b79d3c74e070ed1afdace04ef805e59e4155bd 100644 (file)
@@ -36,24 +36,19 @@ static void rs_poll(struct timer_list *);
 static struct tty_driver *serial_driver;
 static struct tty_port serial_port;
 static DEFINE_TIMER(serial_timer, rs_poll);
-static DEFINE_SPINLOCK(timer_lock);
 
 static int rs_open(struct tty_struct *tty, struct file * filp)
 {
-       spin_lock_bh(&timer_lock);
        if (tty->count == 1)
                mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
-       spin_unlock_bh(&timer_lock);
 
        return 0;
 }
 
 static void rs_close(struct tty_struct *tty, struct file * filp)
 {
-       spin_lock_bh(&timer_lock);
        if (tty->count == 1)
                del_timer_sync(&serial_timer);
-       spin_unlock_bh(&timer_lock);
 }
 
 
@@ -73,8 +68,6 @@ static void rs_poll(struct timer_list *unused)
        int rd = 1;
        unsigned char c;
 
-       spin_lock(&timer_lock);
-
        while (simc_poll(0)) {
                rd = simc_read(0, &c, 1);
                if (rd <= 0)
@@ -87,7 +80,6 @@ static void rs_poll(struct timer_list *unused)
                tty_flip_buffer_push(port);
        if (rd)
                mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
-       spin_unlock(&timer_lock);
 }
 
 
index cdd7b2915c532c1590abafe0019594402b98cbc3..4259125e16ab24075d902f989f424f8b046c9bee 100644 (file)
@@ -1598,7 +1598,7 @@ EXPORT_SYMBOL(bio_split);
 void bio_trim(struct bio *bio, sector_t offset, sector_t size)
 {
        if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS ||
-                        offset + size > bio->bi_iter.bi_size))
+                        offset + size > bio_sectors(bio)))
                return;
 
        size <<= 9;
index 0430926426fe3401d0493e193f9c5587c3634add..8dfe62786cd5fa5eae77afed283fcff238070835 100644 (file)
@@ -65,19 +65,12 @@ static bool blkcg_policy_enabled(struct request_queue *q,
        return pol && test_bit(pol->plid, q->blkcg_pols);
 }
 
-/**
- * blkg_free - free a blkg
- * @blkg: blkg to free
- *
- * Free @blkg which may be partially allocated.
- */
-static void blkg_free(struct blkcg_gq *blkg)
+static void blkg_free_workfn(struct work_struct *work)
 {
+       struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
+                                            free_work);
        int i;
 
-       if (!blkg)
-               return;
-
        for (i = 0; i < BLKCG_MAX_POLS; i++)
                if (blkg->pd[i])
                        blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
@@ -89,6 +82,25 @@ static void blkg_free(struct blkcg_gq *blkg)
        kfree(blkg);
 }
 
+/**
+ * blkg_free - free a blkg
+ * @blkg: blkg to free
+ *
+ * Free @blkg which may be partially allocated.
+ */
+static void blkg_free(struct blkcg_gq *blkg)
+{
+       if (!blkg)
+               return;
+
+       /*
+        * Both ->pd_free_fn() and request queue's release handler may
+        * sleep, so free us by scheduling one work func
+        */
+       INIT_WORK(&blkg->free_work, blkg_free_workfn);
+       schedule_work(&blkg->free_work);
+}
+
 static void __blkg_release(struct rcu_head *rcu)
 {
        struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
index 11f49f78db32bcd8a5525bda5d09e8b414e57d04..df9cfe4ca5328e48d7590ec74935712fada4ed02 100644 (file)
@@ -280,7 +280,6 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
 
                task_lock(task);
                if (task->flags & PF_EXITING) {
-                       err = -ESRCH;
                        kmem_cache_free(iocontext_cachep, ioc);
                        goto out;
                }
@@ -292,7 +291,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
        task->io_context->ioprio = ioprio;
 out:
        task_unlock(task);
-       return err;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(set_task_ioprio);
 
index e6f24fa4a4c227f699aac582f1900e7dc7c5a22f..c4370d2761706d102d8c211119f96dcabe90f336 100644 (file)
@@ -794,7 +794,8 @@ bool blk_update_request(struct request *req, blk_status_t error,
 #endif
 
        if (unlikely(error && !blk_rq_is_passthrough(req) &&
-                    !(req->rq_flags & RQF_QUIET))) {
+                    !(req->rq_flags & RQF_QUIET)) &&
+                    !test_bit(GD_DEAD, &req->q->disk->state)) {
                blk_print_req_error(req, error);
                trace_block_rq_error(req, error, nr_bytes);
        }
@@ -4462,21 +4463,28 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
        return true;
 }
 
-static void blk_mq_elv_switch_back(struct list_head *head,
-               struct request_queue *q)
+static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head,
+                                               struct request_queue *q)
 {
        struct blk_mq_qe_pair *qe;
-       struct elevator_type *t = NULL;
 
        list_for_each_entry(qe, head, node)
-               if (qe->q == q) {
-                       t = qe->type;
-                       break;
-               }
+               if (qe->q == q)
+                       return qe;
 
-       if (!t)
-               return;
+       return NULL;
+}
 
+static void blk_mq_elv_switch_back(struct list_head *head,
+                                 struct request_queue *q)
+{
+       struct blk_mq_qe_pair *qe;
+       struct elevator_type *t;
+
+       qe = blk_lookup_qe_pair(head, q);
+       if (!qe)
+               return;
+       t = qe->type;
        list_del(&qe->node);
        kfree(qe);
 
index 2eb01becde8c412c11a7ffedd8a04bd8e41521ac..7e44eccc676dd270da538fa116a4a350984b64b4 100644 (file)
@@ -101,9 +101,6 @@ u64 wbt_default_latency_nsec(struct request_queue *);
 
 #else
 
-static inline void wbt_track(struct request *rq, enum wbt_flags flags)
-{
-}
 static inline int wbt_init(struct request_queue *q)
 {
        return -EINVAL;
index c9a4fc90d3e9018a4eaf691ad7cbe8433461b7ab..b8b6759d670f01dc3584b8c4fda150a0c03579f1 100644 (file)
@@ -335,7 +335,7 @@ int blk_alloc_ext_minor(void)
 {
        int idx;
 
-       idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT, GFP_KERNEL);
+       idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT - 1, GFP_KERNEL);
        if (idx == -ENOSPC)
                return -EBUSY;
        return idx;
index 4a86340133e46b2465ed0de12fbf81b836ee81c3..f8703db99c734a7d6724e08113d6a09f43837b9f 100644 (file)
@@ -629,7 +629,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
                return compat_put_long(argp,
                        (bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
        case BLKGETSIZE:
-               if (bdev_nr_sectors(bdev) > ~0UL)
+               if (bdev_nr_sectors(bdev) > ~(compat_ulong_t)0)
                        return -EFBIG;
                return compat_put_ulong(argp, bdev_nr_sectors(bdev));
 
index 3ea7fe60823f5b1c85a1db07bf3cd80c3ffb463e..d8443cfb1c401ae51bec3a34b681ea4090149c5f 100644 (file)
@@ -13,34 +13,20 @@ obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist_nohashes.o
 endif
 
 quiet_cmd_extract_certs  = CERT    $@
-      cmd_extract_certs  = $(obj)/extract-cert $(2) $@
+      cmd_extract_certs  = $(obj)/extract-cert $(extract-cert-in) $@
+extract-cert-in = $(or $(filter-out $(obj)/extract-cert, $(real-prereqs)),"")
 
 $(obj)/system_certificates.o: $(obj)/x509_certificate_list
 
 $(obj)/x509_certificate_list: $(CONFIG_SYSTEM_TRUSTED_KEYS) $(obj)/extract-cert FORCE
-       $(call if_changed,extract_certs,$(if $(CONFIG_SYSTEM_TRUSTED_KEYS),$<,""))
+       $(call if_changed,extract_certs)
 
 targets += x509_certificate_list
 
-ifeq ($(CONFIG_MODULE_SIG),y)
-       SIGN_KEY = y
-endif
-
-ifeq ($(CONFIG_IMA_APPRAISE_MODSIG),y)
-ifeq ($(CONFIG_MODULES),y)
-       SIGN_KEY = y
-endif
-endif
-
-ifdef SIGN_KEY
-###############################################################################
-#
 # If module signing is requested, say by allyesconfig, but a key has not been
 # supplied, then one will need to be generated to make sure the build does not
 # fail and that the kernel may be used afterwards.
 #
-###############################################################################
-
 # We do it this way rather than having a boolean option for enabling an
 # external private key, because 'make randconfig' might enable such a
 # boolean option and we unfortunately can't make it depend on !RANDCONFIG.
@@ -67,23 +53,22 @@ $(obj)/x509.genkey:
 
 endif # CONFIG_MODULE_SIG_KEY
 
-# If CONFIG_MODULE_SIG_KEY isn't a PKCS#11 URI, depend on it
-ifneq ($(filter-out pkcs11:%, $(CONFIG_MODULE_SIG_KEY)),)
-X509_DEP := $(CONFIG_MODULE_SIG_KEY)
-endif
-
 $(obj)/system_certificates.o: $(obj)/signing_key.x509
 
-$(obj)/signing_key.x509: $(X509_DEP) $(obj)/extract-cert FORCE
-       $(call if_changed,extract_certs,$(if $(CONFIG_MODULE_SIG_KEY),$(if $(X509_DEP),$<,$(CONFIG_MODULE_SIG_KEY)),""))
-endif # CONFIG_MODULE_SIG
+PKCS11_URI := $(filter pkcs11:%, $(CONFIG_MODULE_SIG_KEY))
+ifdef PKCS11_URI
+$(obj)/signing_key.x509: extract-cert-in := $(PKCS11_URI)
+endif
+
+$(obj)/signing_key.x509: $(filter-out $(PKCS11_URI),$(CONFIG_MODULE_SIG_KEY)) $(obj)/extract-cert FORCE
+       $(call if_changed,extract_certs)
 
 targets += signing_key.x509
 
 $(obj)/revocation_certificates.o: $(obj)/x509_revocation_list
 
 $(obj)/x509_revocation_list: $(CONFIG_SYSTEM_REVOCATION_KEYS) $(obj)/extract-cert FORCE
-       $(call if_changed,extract_certs,$(if $(CONFIG_SYSTEM_REVOCATION_KEYS),$<,""))
+       $(call if_changed,extract_certs)
 
 targets += x509_revocation_list
 
index e1645e6f4d97467d87413c506ec7aeda33ef38a1..003e25d4a17e297838ea0c9c8391b81af6449c98 100644 (file)
@@ -9,10 +9,7 @@
 system_certificate_list:
 __cert_list_start:
 __module_cert_start:
-#if defined(CONFIG_MODULE_SIG) || (defined(CONFIG_IMA_APPRAISE_MODSIG) \
-                              && defined(CONFIG_MODULES))
        .incbin "certs/signing_key.x509"
-#endif
 __module_cert_end:
        .incbin "certs/x509_certificate_list"
 __cert_list_end:
index a5fe2926bf506466a5d4fa6b9dbcace3e38ca4b5..0555f68c2dfd68c2ddc4609fc56b9d4dd738de94 100644 (file)
@@ -353,29 +353,27 @@ static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
 static void ipmi_cancel_tx_msg(struct acpi_ipmi_device *ipmi,
                               struct acpi_ipmi_msg *msg)
 {
-       struct acpi_ipmi_msg *tx_msg, *temp;
-       bool msg_found = false;
+       struct acpi_ipmi_msg *tx_msg = NULL, *iter, *temp;
        unsigned long flags;
 
        spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
-       list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
-               if (msg == tx_msg) {
-                       msg_found = true;
-                       list_del(&tx_msg->head);
+       list_for_each_entry_safe(iter, temp, &ipmi->tx_msg_list, head) {
+               if (msg == iter) {
+                       tx_msg = iter;
+                       list_del(&iter->head);
                        break;
                }
        }
        spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
 
-       if (msg_found)
+       if (tx_msg)
                acpi_ipmi_msg_put(tx_msg);
 }
 
 static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
 {
        struct acpi_ipmi_device *ipmi_device = user_msg_data;
-       bool msg_found = false;
-       struct acpi_ipmi_msg *tx_msg, *temp;
+       struct acpi_ipmi_msg *tx_msg = NULL, *iter, *temp;
        struct device *dev = ipmi_device->dev;
        unsigned long flags;
 
@@ -387,16 +385,16 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
        }
 
        spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
-       list_for_each_entry_safe(tx_msg, temp, &ipmi_device->tx_msg_list, head) {
-               if (msg->msgid == tx_msg->tx_msgid) {
-                       msg_found = true;
-                       list_del(&tx_msg->head);
+       list_for_each_entry_safe(iter, temp, &ipmi_device->tx_msg_list, head) {
+               if (msg->msgid == iter->tx_msgid) {
+                       tx_msg = iter;
+                       list_del(&iter->head);
                        break;
                }
        }
        spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
 
-       if (!msg_found) {
+       if (!tx_msg) {
                dev_warn(dev,
                         "Unexpected response (msg id %ld) is returned.\n",
                         msg->msgid);
@@ -482,15 +480,14 @@ err_ref:
 
 static void ipmi_bmc_gone(int iface)
 {
-       struct acpi_ipmi_device *ipmi_device, *temp;
-       bool dev_found = false;
+       struct acpi_ipmi_device *ipmi_device = NULL, *iter, *temp;
 
        mutex_lock(&driver_data.ipmi_lock);
-       list_for_each_entry_safe(ipmi_device, temp,
+       list_for_each_entry_safe(iter, temp,
                                 &driver_data.ipmi_devices, head) {
-               if (ipmi_device->ipmi_ifnum != iface) {
-                       dev_found = true;
-                       __ipmi_dev_kill(ipmi_device);
+               if (iter->ipmi_ifnum != iface) {
+                       ipmi_device = iter;
+                       __ipmi_dev_kill(iter);
                        break;
                }
        }
@@ -500,7 +497,7 @@ static void ipmi_bmc_gone(int iface)
                                        struct acpi_ipmi_device, head);
        mutex_unlock(&driver_data.ipmi_lock);
 
-       if (dev_found) {
+       if (ipmi_device) {
                ipmi_flush_tx_msg(ipmi_device);
                acpi_ipmi_dev_put(ipmi_device);
        }
index c7fdb12c33105adf0b33481b84493ac078d9eddd..33b7fbbeda82ee60f9f573d3f825f637b07802d2 100644 (file)
@@ -319,7 +319,7 @@ repeat:
        if (res_ins)
                list_add(&res_ins->list, res_list);
        else {
-               res_ins = kmalloc(sizeof(*res), GFP_KERNEL);
+               res_ins = kmalloc(sizeof(*res_ins), GFP_KERNEL);
                if (!res_ins)
                        return -ENOMEM;
                res_ins->start = start;
index d418449194eeebac7b8f38474e504771a3604a43..bc1454789a065906e6d914cbc2fe10991f79798d 100644 (file)
@@ -654,7 +654,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
        unsigned int num_ent, i, cpc_rev;
        int pcc_subspace_id = -1;
        acpi_status status;
-       int ret = -EFAULT;
+       int ret = -ENODATA;
 
        if (osc_sb_cppc_not_supported)
                return -ENODEV;
@@ -679,9 +679,14 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
        cpc_obj = &out_obj->package.elements[0];
        if (cpc_obj->type == ACPI_TYPE_INTEGER) {
                num_ent = cpc_obj->integer.value;
+               if (num_ent <= 1) {
+                       pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n",
+                                num_ent, pr->id);
+                       goto out_free;
+               }
        } else {
-               pr_debug("Unexpected entry type(%d) for NumEntries\n",
-                               cpc_obj->type);
+               pr_debug("Unexpected _CPC NumEntries entry type (%d) for CPU:%d\n",
+                        cpc_obj->type, pr->id);
                goto out_free;
        }
        cpc_ptr->num_entries = num_ent;
@@ -691,8 +696,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
        if (cpc_obj->type == ACPI_TYPE_INTEGER) {
                cpc_rev = cpc_obj->integer.value;
        } else {
-               pr_debug("Unexpected entry type(%d) for Revision\n",
-                               cpc_obj->type);
+               pr_debug("Unexpected _CPC Revision entry type (%d) for CPU:%d\n",
+                        cpc_obj->type, pr->id);
                goto out_free;
        }
        cpc_ptr->version = cpc_rev;
@@ -723,7 +728,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
                                        if (pcc_data_alloc(pcc_subspace_id))
                                                goto out_free;
                                } else if (pcc_subspace_id != gas_t->access_width) {
-                                       pr_debug("Mismatched PCC ids.\n");
+                                       pr_debug("Mismatched PCC ids in _CPC for CPU:%d\n",
+                                                pr->id);
                                        goto out_free;
                                }
                        } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
@@ -742,20 +748,21 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
                                         * SystemIO doesn't implement 64-bit
                                         * registers.
                                         */
-                                       pr_debug("Invalid access width %d for SystemIO register\n",
-                                               gas_t->access_width);
+                                       pr_debug("Invalid access width %d for SystemIO register in _CPC\n",
+                                                gas_t->access_width);
                                        goto out_free;
                                }
                                if (gas_t->address & OVER_16BTS_MASK) {
                                        /* SystemIO registers use 16-bit integer addresses */
-                                       pr_debug("Invalid IO port %llu for SystemIO register\n",
-                                               gas_t->address);
+                                       pr_debug("Invalid IO port %llu for SystemIO register in _CPC\n",
+                                                gas_t->address);
                                        goto out_free;
                                }
                        } else {
                                if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
                                        /* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */
-                                       pr_debug("Unsupported register type: %d\n", gas_t->space_id);
+                                       pr_debug("Unsupported register type (%d) in _CPC\n",
+                                                gas_t->space_id);
                                        goto out_free;
                                }
                        }
@@ -763,7 +770,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
                        cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
                        memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
                } else {
-                       pr_debug("Err in entry:%d in CPC table of CPU:%d\n", i, pr->id);
+                       pr_debug("Invalid entry type (%d) in _CPC for CPU:%d\n",
+                                i, pr->id);
                        goto out_free;
                }
        }
index 32b20efff5f83c9fab837057872002a20bed841a..4556c86c34659e55a6ca82c8a769c37f78ec79c1 100644 (file)
@@ -570,8 +570,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
 {
        struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
 
-       if (cx->type == ACPI_STATE_C3)
-               ACPI_FLUSH_CPU_CACHE();
+       ACPI_FLUSH_CPU_CACHE();
 
        while (1) {
 
index 9efbfe087de761264f8f0fc8743f9182a77d5cc6..762b61f67e6c6db32ff1edcad58bec823e0aa4ff 100644 (file)
@@ -588,19 +588,6 @@ static struct acpi_device *handle_to_device(acpi_handle handle,
        return adev;
 }
 
-int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
-{
-       if (!device)
-               return -EINVAL;
-
-       *device = handle_to_device(handle, NULL);
-       if (!*device)
-               return -ENODEV;
-
-       return 0;
-}
-EXPORT_SYMBOL(acpi_bus_get_device);
-
 /**
  * acpi_fetch_acpi_dev - Retrieve ACPI device object.
  * @handle: ACPI handle associated with the requested ACPI device object.
index ceee808f7f2a27520e84896e5fdbcb88927501a4..47ec11d4c68edcfed9428be3970bc3affa737067 100644 (file)
@@ -151,8 +151,8 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                {
                        struct acpi_madt_local_apic_override *p =
                            (struct acpi_madt_local_apic_override *)header;
-                       pr_info("LAPIC_ADDR_OVR (address[%p])\n",
-                               (void *)(unsigned long)p->address);
+                       pr_info("LAPIC_ADDR_OVR (address[0x%llx])\n",
+                               p->address);
                }
                break;
 
index e5641e6c52ee27dab1b8ffa3a89a2f1afe4f6713..bb45a9c00514475b994813155045880c6ac219af 100644 (file)
@@ -115,14 +115,16 @@ config SATA_AHCI
 
          If unsure, say N.
 
-config SATA_LPM_POLICY
+config SATA_MOBILE_LPM_POLICY
        int "Default SATA Link Power Management policy for low power chipsets"
        range 0 4
        default 0
        depends on SATA_AHCI
        help
          Select the Default SATA Link Power Management (LPM) policy to use
-         for chipsets / "South Bridges" designated as supporting low power.
+         for chipsets / "South Bridges" supporting low-power modes. Such
+         chipsets are typically found on most laptops but desktops and
+         servers now also widely use chipsets supporting low power modes.
 
          The value set has the following meanings:
                0 => Keep firmware settings
index 84456c05e84525c6b301689a7cd39c0403dda0c7..397dfd27c90d4fc6a252532770d8ebb04a487071 100644 (file)
@@ -1595,7 +1595,7 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
 static void ahci_update_initial_lpm_policy(struct ata_port *ap,
                                           struct ahci_host_priv *hpriv)
 {
-       int policy = CONFIG_SATA_LPM_POLICY;
+       int policy = CONFIG_SATA_MOBILE_LPM_POLICY;
 
 
        /* Ignore processing for chipsets that don't use policy */
index 6ead58c1b6e5293a805b041bdf56df0be42a7f5a..ad11a4c52fbeb9120010b872799c97c0fceb26e3 100644 (file)
@@ -236,7 +236,7 @@ enum {
        AHCI_HFLAG_NO_WRITE_TO_RO       = (1 << 24), /* don't write to read
                                                        only registers */
        AHCI_HFLAG_USE_LPM_POLICY       = (1 << 25), /* chipset that should use
-                                                       SATA_LPM_POLICY
+                                                       SATA_MOBILE_LPM_POLICY
                                                        as default lpm_policy */
        AHCI_HFLAG_SUSPEND_PHYS         = (1 << 26), /* handle PHYs during
                                                        suspend/resume */
index cceedde5112690f1cbaa873418d3463078f0ae08..ca64837641be2caa7b1df0a4e96662c21b388abb 100644 (file)
@@ -4014,6 +4014,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Crucial_CT*MX100*",          "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "Samsung SSD 840 EVO*",       NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
+                                               ATA_HORKAGE_NO_DMA_LOG |
+                                               ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Samsung SSD 840*",           NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Samsung SSD 850*",           NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
index b3be7a8f5bea6079e5e5d80e0a55b32494483c7f..b1666adc1c3a39160fa4b4f133fc8719990999dc 100644 (file)
@@ -1634,7 +1634,7 @@ EXPORT_SYMBOL_GPL(ata_sff_interrupt);
 
 void ata_sff_lost_interrupt(struct ata_port *ap)
 {
-       u8 status;
+       u8 status = 0;
        struct ata_queued_cmd *qc;
 
        /* Only one outstanding command per SFF channel */
index bec33d781ae046f989742c0b381b06ca6bd92beb..e3263e961045ac342636156d40986a11a85ebe33 100644 (file)
@@ -137,7 +137,11 @@ struct sata_dwc_device {
 #endif
 };
 
-#define SATA_DWC_QCMD_MAX      32
+/*
+ * Allow one extra special slot for commands and DMA management
+ * to account for libata internal commands.
+ */
+#define SATA_DWC_QCMD_MAX      (ATA_MAX_QUEUE + 1)
 
 struct sata_dwc_device_port {
        struct sata_dwc_device  *hsdev;
index 2578b2d454397a20ce2248a759993b0655b17f64..e465108d9998a1563a646ddb3fef6309a98337b4 100644 (file)
@@ -1,10 +1,10 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- *  console driver for LCD2S 4x20 character displays connected through i2c.
- *  The display also has a spi interface, but the driver does not support
+ *  Console driver for LCD2S 4x20 character displays connected through i2c.
+ *  The display also has a SPI interface, but the driver does not support
  *  this yet.
  *
- *  This is a driver allowing you to use a LCD2S 4x20 from modtronix
+ *  This is a driver allowing you to use a LCD2S 4x20 from Modtronix
  *  engineering as auxdisplay character device.
  *
  *  (C) 2019 by Lemonage Software GmbH
@@ -12,7 +12,9 @@
  *  All rights reserved.
  */
 #include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
+#include <linux/property.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
 #include <linux/delay.h>
@@ -104,7 +106,7 @@ static int lcd2s_print(struct charlcd *lcd, int c)
 static int lcd2s_gotoxy(struct charlcd *lcd, unsigned int x, unsigned int y)
 {
        struct lcd2s_data *lcd2s = lcd->drvdata;
-       u8 buf[] = { LCD2S_CMD_CUR_POS, y + 1, x + 1};
+       u8 buf[3] = { LCD2S_CMD_CUR_POS, y + 1, x + 1 };
 
        lcd2s_i2c_master_send(lcd2s->i2c, buf, sizeof(buf));
 
@@ -214,16 +216,15 @@ static int lcd2s_lines(struct charlcd *lcd, enum charlcd_lines lines)
        return 0;
 }
 
+/*
+ * Generator: LGcxxxxx...xx; must have <c> between '0' and '7',
+ * representing the numerical ASCII code of the redefined character,
+ * and <xx...xx> a sequence of 16 hex digits representing 8 bytes
+ * for each character. Most LCDs will only use 5 lower bits of
+ * the 7 first bytes.
+ */
 static int lcd2s_redefine_char(struct charlcd *lcd, char *esc)
 {
-       /* Generator : LGcxxxxx...xx; must have <c> between '0'
-        * and '7', representing the numerical ASCII code of the
-        * redefined character, and <xx...xx> a sequence of 16
-        * hex digits representing 8 bytes for each character.
-        * Most LCDs will only use 5 lower bits of the 7 first
-        * bytes.
-        */
-
        struct lcd2s_data *lcd2s = lcd->drvdata;
        u8 buf[LCD2S_CHARACTER_SIZE + 2] = { LCD2S_CMD_DEF_CUSTOM_CHAR };
        u8 value;
@@ -286,8 +287,7 @@ static const struct charlcd_ops lcd2s_ops = {
        .redefine_char  = lcd2s_redefine_char,
 };
 
-static int lcd2s_i2c_probe(struct i2c_client *i2c,
-                               const struct i2c_device_id *id)
+static int lcd2s_i2c_probe(struct i2c_client *i2c)
 {
        struct charlcd *lcd;
        struct lcd2s_data *lcd2s;
@@ -355,43 +355,22 @@ static const struct i2c_device_id lcd2s_i2c_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, lcd2s_i2c_id);
 
-#ifdef CONFIG_OF
 static const struct of_device_id lcd2s_of_table[] = {
        { .compatible = "modtronix,lcd2s" },
        { }
 };
 MODULE_DEVICE_TABLE(of, lcd2s_of_table);
-#endif
 
 static struct i2c_driver lcd2s_i2c_driver = {
        .driver = {
                .name = "lcd2s",
-#ifdef CONFIG_OF
-               .of_match_table = of_match_ptr(lcd2s_of_table),
-#endif
+               .of_match_table = lcd2s_of_table,
        },
-       .probe = lcd2s_i2c_probe,
+       .probe_new = lcd2s_i2c_probe,
        .remove = lcd2s_i2c_remove,
        .id_table = lcd2s_i2c_id,
 };
-
-static int __init lcd2s_modinit(void)
-{
-       int ret = 0;
-
-       ret = i2c_add_driver(&lcd2s_i2c_driver);
-       if (ret != 0)
-               pr_err("Failed to register lcd2s driver\n");
-
-       return ret;
-}
-module_init(lcd2s_modinit)
-
-static void __exit lcd2s_exit(void)
-{
-       i2c_del_driver(&lcd2s_i2c_driver);
-}
-module_exit(lcd2s_exit)
+module_i2c_driver(lcd2s_i2c_driver);
 
 MODULE_DESCRIPTION("LCD2S character display driver");
 MODULE_AUTHOR("Lars Poeschel");
index af6bea56f4e25a9088499bef419ba0cdb87dbead..3fc3b5940bb31983d1fc0b1f65dede25c5b2c090 100644 (file)
@@ -296,6 +296,7 @@ int driver_deferred_probe_check_state(struct device *dev)
 
        return -EPROBE_DEFER;
 }
+EXPORT_SYMBOL_GPL(driver_deferred_probe_check_state);
 
 static void deferred_probe_timeout_work_func(struct work_struct *work)
 {
index 4b55e864a0a348d2252a2fb2fffd57e5240d4bc3..4d3efaa20b7bffca3c9cae661a417d14bf2458f3 100644 (file)
@@ -1638,22 +1638,22 @@ struct sib_info {
 };
 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
 
-extern void notify_resource_state(struct sk_buff *,
+extern int notify_resource_state(struct sk_buff *,
                                  unsigned int,
                                  struct drbd_resource *,
                                  struct resource_info *,
                                  enum drbd_notification_type);
-extern void notify_device_state(struct sk_buff *,
+extern int notify_device_state(struct sk_buff *,
                                unsigned int,
                                struct drbd_device *,
                                struct device_info *,
                                enum drbd_notification_type);
-extern void notify_connection_state(struct sk_buff *,
+extern int notify_connection_state(struct sk_buff *,
                                    unsigned int,
                                    struct drbd_connection *,
                                    struct connection_info *,
                                    enum drbd_notification_type);
-extern void notify_peer_device_state(struct sk_buff *,
+extern int notify_peer_device_state(struct sk_buff *,
                                     unsigned int,
                                     struct drbd_peer_device *,
                                     struct peer_device_info *,
index 96881d5babd9e1bb356db1890fb9af1f55e2cb8f..4b0b25cc916eed144be0c712034a39ec69304ed3 100644 (file)
@@ -171,7 +171,7 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
                unsigned int set_size)
 {
        struct drbd_request *r;
-       struct drbd_request *req = NULL;
+       struct drbd_request *req = NULL, *tmp = NULL;
        int expect_epoch = 0;
        int expect_size = 0;
 
@@ -225,8 +225,11 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
         * to catch requests being barrier-acked "unexpectedly".
         * It usually should find the same req again, or some READ preceding it. */
        list_for_each_entry(req, &connection->transfer_log, tl_requests)
-               if (req->epoch == expect_epoch)
+               if (req->epoch == expect_epoch) {
+                       tmp = req;
                        break;
+               }
+       req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests);
        list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
                if (req->epoch != expect_epoch)
                        break;
@@ -2716,6 +2719,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
        sprintf(disk->disk_name, "drbd%d", minor);
        disk->private_data = device;
 
+       blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue);
        blk_queue_write_cache(disk->queue, true, true);
        /* Setting the max_hw_sectors to an odd value of 8kibyte here
           This triggers a max_bio_size message upon first attach or connect */
@@ -2770,12 +2774,12 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
 
        if (init_submitter(device)) {
                err = ERR_NOMEM;
-               goto out_idr_remove_vol;
+               goto out_idr_remove_from_resource;
        }
 
        err = add_disk(disk);
        if (err)
-               goto out_idr_remove_vol;
+               goto out_idr_remove_from_resource;
 
        /* inherit the connection state */
        device->state.conn = first_connection(resource)->cstate;
@@ -2789,8 +2793,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
        drbd_debugfs_device_add(device);
        return NO_ERROR;
 
-out_idr_remove_vol:
-       idr_remove(&connection->peer_devices, vnr);
 out_idr_remove_from_resource:
        for_each_connection(connection, resource) {
                peer_device = idr_remove(&connection->peer_devices, vnr);
index 02030c9c4d3b16d55e0fc857dfe80b53472ac021..b7216c186ba4d731c6212f30751b435805a9fcb5 100644 (file)
@@ -4549,7 +4549,7 @@ static int nla_put_notification_header(struct sk_buff *msg,
        return drbd_notification_header_to_skb(msg, &nh, true);
 }
 
-void notify_resource_state(struct sk_buff *skb,
+int notify_resource_state(struct sk_buff *skb,
                           unsigned int seq,
                           struct drbd_resource *resource,
                           struct resource_info *resource_info,
@@ -4591,16 +4591,17 @@ void notify_resource_state(struct sk_buff *skb,
                if (err && err != -ESRCH)
                        goto failed;
        }
-       return;
+       return 0;
 
 nla_put_failure:
        nlmsg_free(skb);
 failed:
        drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
                        err, seq);
+       return err;
 }
 
-void notify_device_state(struct sk_buff *skb,
+int notify_device_state(struct sk_buff *skb,
                         unsigned int seq,
                         struct drbd_device *device,
                         struct device_info *device_info,
@@ -4640,16 +4641,17 @@ void notify_device_state(struct sk_buff *skb,
                if (err && err != -ESRCH)
                        goto failed;
        }
-       return;
+       return 0;
 
 nla_put_failure:
        nlmsg_free(skb);
 failed:
        drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n",
                 err, seq);
+       return err;
 }
 
-void notify_connection_state(struct sk_buff *skb,
+int notify_connection_state(struct sk_buff *skb,
                             unsigned int seq,
                             struct drbd_connection *connection,
                             struct connection_info *connection_info,
@@ -4689,16 +4691,17 @@ void notify_connection_state(struct sk_buff *skb,
                if (err && err != -ESRCH)
                        goto failed;
        }
-       return;
+       return 0;
 
 nla_put_failure:
        nlmsg_free(skb);
 failed:
        drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n",
                 err, seq);
+       return err;
 }
 
-void notify_peer_device_state(struct sk_buff *skb,
+int notify_peer_device_state(struct sk_buff *skb,
                              unsigned int seq,
                              struct drbd_peer_device *peer_device,
                              struct peer_device_info *peer_device_info,
@@ -4739,13 +4742,14 @@ void notify_peer_device_state(struct sk_buff *skb,
                if (err && err != -ESRCH)
                        goto failed;
        }
-       return;
+       return 0;
 
 nla_put_failure:
        nlmsg_free(skb);
 failed:
        drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n",
                 err, seq);
+       return err;
 }
 
 void notify_helper(enum drbd_notification_type type,
@@ -4796,7 +4800,7 @@ fail:
                 err, seq);
 }
 
-static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
+static int notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
 {
        struct drbd_genlmsghdr *dh;
        int err;
@@ -4810,11 +4814,12 @@ static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
        if (nla_put_notification_header(skb, NOTIFY_EXISTS))
                goto nla_put_failure;
        genlmsg_end(skb, dh);
-       return;
+       return 0;
 
 nla_put_failure:
        nlmsg_free(skb);
        pr_err("Error %d sending event. Event seq:%u\n", err, seq);
+       return err;
 }
 
 static void free_state_changes(struct list_head *list)
@@ -4841,6 +4846,7 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
        unsigned int seq = cb->args[2];
        unsigned int n;
        enum drbd_notification_type flags = 0;
+       int err = 0;
 
        /* There is no need for taking notification_mutex here: it doesn't
           matter if the initial state events mix with later state chage
@@ -4849,32 +4855,32 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
 
        cb->args[5]--;
        if (cb->args[5] == 1) {
-               notify_initial_state_done(skb, seq);
+               err = notify_initial_state_done(skb, seq);
                goto out;
        }
        n = cb->args[4]++;
        if (cb->args[4] < cb->args[3])
                flags |= NOTIFY_CONTINUES;
        if (n < 1) {
-               notify_resource_state_change(skb, seq, state_change->resource,
+               err = notify_resource_state_change(skb, seq, state_change->resource,
                                             NOTIFY_EXISTS | flags);
                goto next;
        }
        n--;
        if (n < state_change->n_connections) {
-               notify_connection_state_change(skb, seq, &state_change->connections[n],
+               err = notify_connection_state_change(skb, seq, &state_change->connections[n],
                                               NOTIFY_EXISTS | flags);
                goto next;
        }
        n -= state_change->n_connections;
        if (n < state_change->n_devices) {
-               notify_device_state_change(skb, seq, &state_change->devices[n],
+               err = notify_device_state_change(skb, seq, &state_change->devices[n],
                                           NOTIFY_EXISTS | flags);
                goto next;
        }
        n -= state_change->n_devices;
        if (n < state_change->n_devices * state_change->n_connections) {
-               notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
+               err = notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
                                                NOTIFY_EXISTS | flags);
                goto next;
        }
@@ -4889,7 +4895,10 @@ next:
                cb->args[4] = 0;
        }
 out:
-       return skb->len;
+       if (err)
+               return err;
+       else
+               return skb->len;
 }
 
 int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
index c04394518b07f53b98722e32ede77aeb6e965f6d..75be0e16770a090f99e1573140520a30651b54ff 100644 (file)
@@ -180,7 +180,8 @@ void start_new_tl_epoch(struct drbd_connection *connection)
 void complete_master_bio(struct drbd_device *device,
                struct bio_and_error *m)
 {
-       m->bio->bi_status = errno_to_blk_status(m->error);
+       if (unlikely(m->error))
+               m->bio->bi_status = errno_to_blk_status(m->error);
        bio_endio(m->bio);
        dec_ap_bio(device);
 }
@@ -332,17 +333,21 @@ static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct dr
 static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
 {
        struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
+       struct drbd_request *iter = req;
        if (!connection)
                return;
        if (connection->req_next != req)
                return;
-       list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
-               const unsigned s = req->rq_state;
-               if (s & RQ_NET_QUEUED)
+
+       req = NULL;
+       list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) {
+               const unsigned int s = iter->rq_state;
+
+               if (s & RQ_NET_QUEUED) {
+                       req = iter;
                        break;
+               }
        }
-       if (&req->tl_requests == &connection->transfer_log)
-               req = NULL;
        connection->req_next = req;
 }
 
@@ -358,17 +363,21 @@ static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, st
 static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
 {
        struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
+       struct drbd_request *iter = req;
        if (!connection)
                return;
        if (connection->req_ack_pending != req)
                return;
-       list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
-               const unsigned s = req->rq_state;
-               if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING))
+
+       req = NULL;
+       list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) {
+               const unsigned int s = iter->rq_state;
+
+               if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING)) {
+                       req = iter;
                        break;
+               }
        }
-       if (&req->tl_requests == &connection->transfer_log)
-               req = NULL;
        connection->req_ack_pending = req;
 }
 
@@ -384,17 +393,21 @@ static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, s
 static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
 {
        struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
+       struct drbd_request *iter = req;
        if (!connection)
                return;
        if (connection->req_not_net_done != req)
                return;
-       list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
-               const unsigned s = req->rq_state;
-               if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE))
+
+       req = NULL;
+       list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) {
+               const unsigned int s = iter->rq_state;
+
+               if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE)) {
+                       req = iter;
                        break;
+               }
        }
-       if (&req->tl_requests == &connection->transfer_log)
-               req = NULL;
        connection->req_not_net_done = req;
 }
 
index b8a27818ab3f83ae1139a30009c0b5e299a3f0b0..4ee11aef6672b8992a19f262284043a912cf5157 100644 (file)
@@ -1537,7 +1537,7 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device,
        return rv;
 }
 
-void notify_resource_state_change(struct sk_buff *skb,
+int notify_resource_state_change(struct sk_buff *skb,
                                  unsigned int seq,
                                  struct drbd_resource_state_change *resource_state_change,
                                  enum drbd_notification_type type)
@@ -1550,10 +1550,10 @@ void notify_resource_state_change(struct sk_buff *skb,
                .res_susp_fen = resource_state_change->susp_fen[NEW],
        };
 
-       notify_resource_state(skb, seq, resource, &resource_info, type);
+       return notify_resource_state(skb, seq, resource, &resource_info, type);
 }
 
-void notify_connection_state_change(struct sk_buff *skb,
+int notify_connection_state_change(struct sk_buff *skb,
                                    unsigned int seq,
                                    struct drbd_connection_state_change *connection_state_change,
                                    enum drbd_notification_type type)
@@ -1564,10 +1564,10 @@ void notify_connection_state_change(struct sk_buff *skb,
                .conn_role = connection_state_change->peer_role[NEW],
        };
 
-       notify_connection_state(skb, seq, connection, &connection_info, type);
+       return notify_connection_state(skb, seq, connection, &connection_info, type);
 }
 
-void notify_device_state_change(struct sk_buff *skb,
+int notify_device_state_change(struct sk_buff *skb,
                                unsigned int seq,
                                struct drbd_device_state_change *device_state_change,
                                enum drbd_notification_type type)
@@ -1577,10 +1577,10 @@ void notify_device_state_change(struct sk_buff *skb,
                .dev_disk_state = device_state_change->disk_state[NEW],
        };
 
-       notify_device_state(skb, seq, device, &device_info, type);
+       return notify_device_state(skb, seq, device, &device_info, type);
 }
 
-void notify_peer_device_state_change(struct sk_buff *skb,
+int notify_peer_device_state_change(struct sk_buff *skb,
                                     unsigned int seq,
                                     struct drbd_peer_device_state_change *p,
                                     enum drbd_notification_type type)
@@ -1594,7 +1594,7 @@ void notify_peer_device_state_change(struct sk_buff *skb,
                .peer_resync_susp_dependency = p->resync_susp_dependency[NEW],
        };
 
-       notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type);
+       return notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type);
 }
 
 static void broadcast_state_change(struct drbd_state_change *state_change)
@@ -1602,7 +1602,7 @@ static void broadcast_state_change(struct drbd_state_change *state_change)
        struct drbd_resource_state_change *resource_state_change = &state_change->resource[0];
        bool resource_state_has_changed;
        unsigned int n_device, n_connection, n_peer_device, n_peer_devices;
-       void (*last_func)(struct sk_buff *, unsigned int, void *,
+       int (*last_func)(struct sk_buff *, unsigned int, void *,
                          enum drbd_notification_type) = NULL;
        void *last_arg = NULL;
 
index ba80f612d6abbc185dc9a73ee5e72dbb86932b8f..d5b0479bc9a6649e6bd423d5793861e5fa0022f6 100644 (file)
@@ -44,19 +44,19 @@ extern struct drbd_state_change *remember_old_state(struct drbd_resource *, gfp_
 extern void copy_old_to_new_state_change(struct drbd_state_change *);
 extern void forget_state_change(struct drbd_state_change *);
 
-extern void notify_resource_state_change(struct sk_buff *,
+extern int notify_resource_state_change(struct sk_buff *,
                                         unsigned int,
                                         struct drbd_resource_state_change *,
                                         enum drbd_notification_type type);
-extern void notify_connection_state_change(struct sk_buff *,
+extern int notify_connection_state_change(struct sk_buff *,
                                           unsigned int,
                                           struct drbd_connection_state_change *,
                                           enum drbd_notification_type type);
-extern void notify_device_state_change(struct sk_buff *,
+extern int notify_device_state_change(struct sk_buff *,
                                       unsigned int,
                                       struct drbd_device_state_change *,
                                       enum drbd_notification_type type);
-extern void notify_peer_device_state_change(struct sk_buff *,
+extern int notify_peer_device_state_change(struct sk_buff *,
                                            unsigned int,
                                            struct drbd_peer_device_state_change *,
                                            enum drbd_notification_type type);
index 3e636a75c83a8cd227db5c8c92f93aa102be138f..a58595f5ee2c8f450a03b651fd6650ddc79f5e0a 100644 (file)
@@ -1591,6 +1591,7 @@ struct compat_loop_info {
        compat_ulong_t  lo_inode;       /* ioctl r/o */
        compat_dev_t    lo_rdevice;     /* ioctl r/o */
        compat_int_t    lo_offset;
+       compat_int_t    lo_encrypt_type;        /* obsolete, ignored */
        compat_int_t    lo_encrypt_key_size;    /* ioctl w/o */
        compat_int_t    lo_flags;       /* ioctl r/o */
        char            lo_name[LO_NAME_SIZE];
index 4db9a8c244af5cd5a1bf1827fc806bc9a3696555..e094d2b8b5a929804d12a3f7be58b97ca5ca13a2 100644 (file)
@@ -88,7 +88,7 @@ static void n64cart_submit_bio(struct bio *bio)
 {
        struct bio_vec bvec;
        struct bvec_iter iter;
-       struct device *dev = bio->bi_disk->private_data;
+       struct device *dev = bio->bi_bdev->bd_disk->private_data;
        u32 pos = bio->bi_iter.bi_sector << SECTOR_SHIFT;
 
        bio_for_each_segment(bvec, bio, iter) {
index 05b1120e66234d71ba18ba06428cd594c01ffe81..c441a4972064e8b5995cc9d35f79e1d8c377d0e2 100644 (file)
@@ -1600,7 +1600,7 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
         * Only fake timeouts need to execute blk_mq_complete_request() here.
         */
        cmd->error = BLK_STS_TIMEOUT;
-       if (cmd->fake_timeout)
+       if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL)
                blk_mq_complete_request(rq);
        return BLK_EH_DONE;
 }
index d1e26461a64ed98a400be79e0345c3240fcf6566..de42458195bc1c8990faf8477168ea3b0d604e20 100644 (file)
@@ -931,7 +931,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
        if (rc)
                goto unmap;
 
-       for (n = 0, i = 0; n < nseg; n++) {
+       for (n = 0; n < nseg; n++) {
                uint8_t first_sect, last_sect;
 
                if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
index 378262ec47aeb590f745d6ada698d28eb285e4d2..003056d4f7f5f078b9dfda5c48833cc2a125a9a8 100644 (file)
@@ -576,7 +576,7 @@ struct setup_rw_req {
        struct blkif_request *ring_req;
        grant_ref_t gref_head;
        unsigned int id;
-       /* Only used when persistent grant is used and it's a read request */
+       /* Only used when persistent grant is used and it's a write request */
        bool need_copy;
        unsigned int bvec_off;
        char *bvec_data;
index 7bd10d63ddbe5904c1e32c4cb8ec6d1647b2eb14..2dc9da683a13e30fe830aa3965b8290eb7129a53 100644 (file)
@@ -1365,7 +1365,6 @@ out_free:
  */
 int cdrom_number_of_slots(struct cdrom_device_info *cdi) 
 {
-       int status;
        int nslots = 1;
        struct cdrom_changer_info *info;
 
@@ -1377,7 +1376,7 @@ int cdrom_number_of_slots(struct cdrom_device_info *cdi)
        if (!info)
                return -ENOMEM;
 
-       if ((status = cdrom_read_mech_status(cdi, info)) == 0)
+       if (cdrom_read_mech_status(cdi, info) == 0)
                nslots = info->hdr.nslots;
 
        kfree(info);
index 740811893c57059cc18d365490840fec3a6094b2..55f48375e3fe5487d8ebb47a250b75dafa9ec61c 100644 (file)
@@ -449,6 +449,7 @@ config RANDOM_TRUST_BOOTLOADER
        device randomness. Say Y here to assume the entropy provided by the
        booloader is trustworthy so it will be added to the kernel's entropy
        pool. Otherwise, say N here so it will be regarded as device input that
-       only mixes the entropy pool.
+       only mixes the entropy pool. This can also be configured at boot with
+       "random.trust_bootloader=on/off".
 
 endmenu
index 66ce7c03a1424b43984d674c2dc4cfce4912bb2a..3a293f919af97c5ed4e92b62c658727b5ee1e12b 100644 (file)
@@ -224,9 +224,10 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, void
  *
  * These interfaces will return the requested number of random bytes
  * into the given buffer or as a return value. This is equivalent to
- * a read from /dev/urandom. The integer family of functions may be
- * higher performance for one-off random integers, because they do a
- * bit of buffering.
+ * a read from /dev/urandom. The u32, u64, int, and long family of
+ * functions may be higher performance for one-off random integers,
+ * because they do a bit of buffering and do not invoke reseeding
+ * until the buffer is emptied.
  *
  *********************************************************************/
 
@@ -332,7 +333,7 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
        chacha20_block(chacha_state, first_block);
 
        memcpy(key, first_block, CHACHA_KEY_SIZE);
-       memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
+       memmove(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
        memzero_explicit(first_block, sizeof(first_block));
 }
 
@@ -436,11 +437,8 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
  * This shouldn't be set by functions like add_device_randomness(),
  * where we can't trust the buffer passed to it is guaranteed to be
  * unpredictable (so it might not have any entropy at all).
- *
- * Returns the number of bytes processed from input, which is bounded
- * by CRNG_INIT_CNT_THRESH if account is true.
  */
-static size_t crng_pre_init_inject(const void *input, size_t len, bool account)
+static void crng_pre_init_inject(const void *input, size_t len, bool account)
 {
        static int crng_init_cnt = 0;
        struct blake2s_state hash;
@@ -451,18 +449,15 @@ static size_t crng_pre_init_inject(const void *input, size_t len, bool account)
        spin_lock_irqsave(&base_crng.lock, flags);
        if (crng_init != 0) {
                spin_unlock_irqrestore(&base_crng.lock, flags);
-               return 0;
+               return;
        }
 
-       if (account)
-               len = min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt);
-
        blake2s_update(&hash, base_crng.key, sizeof(base_crng.key));
        blake2s_update(&hash, input, len);
        blake2s_final(&hash, base_crng.key);
 
        if (account) {
-               crng_init_cnt += len;
+               crng_init_cnt += min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt);
                if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
                        ++base_crng.generation;
                        crng_init = 1;
@@ -473,8 +468,6 @@ static size_t crng_pre_init_inject(const void *input, size_t len, bool account)
 
        if (crng_init == 1)
                pr_notice("fast init done\n");
-
-       return len;
 }
 
 static void _get_random_bytes(void *buf, size_t nbytes)
@@ -530,49 +523,59 @@ EXPORT_SYMBOL(get_random_bytes);
 
 static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
 {
-       bool large_request = nbytes > 256;
-       ssize_t ret = 0;
-       size_t len;
+       size_t len, left, ret = 0;
        u32 chacha_state[CHACHA_STATE_WORDS];
        u8 output[CHACHA_BLOCK_SIZE];
 
        if (!nbytes)
                return 0;
 
-       len = min_t(size_t, 32, nbytes);
-       crng_make_state(chacha_state, output, len);
-
-       if (copy_to_user(buf, output, len))
-               return -EFAULT;
-       nbytes -= len;
-       buf += len;
-       ret += len;
-
-       while (nbytes) {
-               if (large_request && need_resched()) {
-                       if (signal_pending(current))
-                               break;
-                       schedule();
-               }
+       /*
+        * Immediately overwrite the ChaCha key at index 4 with random
+        * bytes, in case userspace causes copy_to_user() below to sleep
+        * forever, so that we still retain forward secrecy in that case.
+        */
+       crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
+       /*
+        * However, if we're doing a read of len <= 32, we don't need to
+        * use chacha_state after, so we can simply return those bytes to
+        * the user directly.
+        */
+       if (nbytes <= CHACHA_KEY_SIZE) {
+               ret = nbytes - copy_to_user(buf, &chacha_state[4], nbytes);
+               goto out_zero_chacha;
+       }
 
+       for (;;) {
                chacha20_block(chacha_state, output);
                if (unlikely(chacha_state[12] == 0))
                        ++chacha_state[13];
 
                len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE);
-               if (copy_to_user(buf, output, len)) {
-                       ret = -EFAULT;
+               left = copy_to_user(buf, output, len);
+               if (left) {
+                       ret += len - left;
                        break;
                }
 
-               nbytes -= len;
                buf += len;
                ret += len;
+               nbytes -= len;
+               if (!nbytes)
+                       break;
+
+               BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0);
+               if (ret % PAGE_SIZE == 0) {
+                       if (signal_pending(current))
+                               break;
+                       cond_resched();
+               }
        }
 
-       memzero_explicit(chacha_state, sizeof(chacha_state));
        memzero_explicit(output, sizeof(output));
-       return ret;
+out_zero_chacha:
+       memzero_explicit(chacha_state, sizeof(chacha_state));
+       return ret ? ret : -EFAULT;
 }
 
 /*
@@ -948,11 +951,17 @@ static bool drain_entropy(void *buf, size_t nbytes, bool force)
  **********************************************************************/
 
 static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
+static bool trust_bootloader __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
 static int __init parse_trust_cpu(char *arg)
 {
        return kstrtobool(arg, &trust_cpu);
 }
+static int __init parse_trust_bootloader(char *arg)
+{
+       return kstrtobool(arg, &trust_bootloader);
+}
 early_param("random.trust_cpu", parse_trust_cpu);
+early_param("random.trust_bootloader", parse_trust_bootloader);
 
 /*
  * The first collection of entropy occurs at system boot while interrupts
@@ -968,6 +977,11 @@ int __init rand_initialize(void)
        bool arch_init = true;
        unsigned long rv;
 
+#if defined(LATENT_ENTROPY_PLUGIN)
+       static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
+       _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
+#endif
+
        for (i = 0; i < BLAKE2S_BLOCK_SIZE; i += sizeof(rv)) {
                if (!arch_get_random_seed_long_early(&rv) &&
                    !arch_get_random_long_early(&rv)) {
@@ -1004,7 +1018,7 @@ int __init rand_initialize(void)
  */
 void add_device_randomness(const void *buf, size_t size)
 {
-       cycles_t cycles = random_get_entropy();
+       unsigned long cycles = random_get_entropy();
        unsigned long flags, now = jiffies;
 
        if (crng_init == 0 && size)
@@ -1035,8 +1049,7 @@ struct timer_rand_state {
  */
 static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
 {
-       cycles_t cycles = random_get_entropy();
-       unsigned long flags, now = jiffies;
+       unsigned long cycles = random_get_entropy(), now = jiffies, flags;
        long delta, delta2, delta3;
 
        spin_lock_irqsave(&input_pool.lock, flags);
@@ -1128,13 +1141,10 @@ void rand_initialize_disk(struct gendisk *disk)
 void add_hwgenerator_randomness(const void *buffer, size_t count,
                                size_t entropy)
 {
-       if (unlikely(crng_init == 0)) {
-               size_t ret = crng_pre_init_inject(buffer, count, true);
-               mix_pool_bytes(buffer, ret);
-               count -= ret;
-               buffer += ret;
-               if (!count || crng_init == 0)
-                       return;
+       if (unlikely(crng_init == 0 && entropy < POOL_MIN_BITS)) {
+               crng_pre_init_inject(buffer, count, true);
+               mix_pool_bytes(buffer, count);
+               return;
        }
 
        /*
@@ -1160,7 +1170,7 @@ EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
  */
 void add_bootloader_randomness(const void *buf, size_t size)
 {
-       if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
+       if (trust_bootloader)
                add_hwgenerator_randomness(buf, size, size * 8);
        else
                add_device_randomness(buf, size);
@@ -1328,8 +1338,7 @@ static void mix_interrupt_randomness(struct work_struct *work)
 void add_interrupt_randomness(int irq)
 {
        enum { MIX_INFLIGHT = 1U << 31 };
-       cycles_t cycles = random_get_entropy();
-       unsigned long now = jiffies;
+       unsigned long cycles = random_get_entropy(), now = jiffies;
        struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
        struct pt_regs *regs = get_irq_regs();
        unsigned int new_count;
@@ -1342,16 +1351,12 @@ void add_interrupt_randomness(int irq)
        if (cycles == 0)
                cycles = get_reg(fast_pool, regs);
 
-       if (sizeof(cycles) == 8)
+       if (sizeof(unsigned long) == 8) {
                irq_data.u64[0] = cycles ^ rol64(now, 32) ^ irq;
-       else {
+               irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_;
+       } else {
                irq_data.u32[0] = cycles ^ irq;
                irq_data.u32[1] = now;
-       }
-
-       if (sizeof(unsigned long) == 8)
-               irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_;
-       else {
                irq_data.u32[2] = regs ? instruction_pointer(regs) : _RET_IP_;
                irq_data.u32[3] = get_reg(fast_pool, regs);
        }
@@ -1398,7 +1403,7 @@ static void entropy_timer(struct timer_list *t)
 static void try_to_generate_entropy(void)
 {
        struct {
-               cycles_t cycles;
+               unsigned long cycles;
                struct timer_list timer;
        } stack;
 
@@ -1533,6 +1538,13 @@ static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
 {
        static int maxwarn = 10;
 
+       /*
+        * Opportunistically attempt to initialize the RNG on platforms that
+        * have fast cycle counters, but don't (for now) require it to succeed.
+        */
+       if (!crng_ready())
+               try_to_generate_entropy();
+
        if (!crng_ready() && maxwarn > 0) {
                maxwarn--;
                if (__ratelimit(&urandom_warning))
index 07a27b65b773cd22a77d29f2c2faf7be118e346b..ed119182aa1bebb509c28b9b6c7abb25045d7909 100644 (file)
@@ -2332,15 +2332,19 @@ int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
 }
 EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
 
-static int clk_set_rate_range_nolock(struct clk *clk,
-                                    unsigned long min,
-                                    unsigned long max)
+/**
+ * clk_set_rate_range - set a rate range for a clock source
+ * @clk: clock source
+ * @min: desired minimum clock rate in Hz, inclusive
+ * @max: desired maximum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
 {
        int ret = 0;
        unsigned long old_min, old_max, rate;
 
-       lockdep_assert_held(&prepare_lock);
-
        if (!clk)
                return 0;
 
@@ -2353,6 +2357,8 @@ static int clk_set_rate_range_nolock(struct clk *clk,
                return -EINVAL;
        }
 
+       clk_prepare_lock();
+
        if (clk->exclusive_count)
                clk_core_rate_unprotect(clk->core);
 
@@ -2396,28 +2402,6 @@ out:
        if (clk->exclusive_count)
                clk_core_rate_protect(clk->core);
 
-       return ret;
-}
-
-/**
- * clk_set_rate_range - set a rate range for a clock source
- * @clk: clock source
- * @min: desired minimum clock rate in Hz, inclusive
- * @max: desired maximum clock rate in Hz, inclusive
- *
- * Return: 0 for success or negative errno on failure.
- */
-int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
-{
-       int ret;
-
-       if (!clk)
-               return 0;
-
-       clk_prepare_lock();
-
-       ret = clk_set_rate_range_nolock(clk, min, max);
-
        clk_prepare_unlock();
 
        return ret;
@@ -4419,7 +4403,9 @@ void __clk_put(struct clk *clk)
        }
 
        hlist_del(&clk->clks_node);
-       clk_set_rate_range_nolock(clk, 0, ULONG_MAX);
+       if (clk->min_rate > clk->core->req_rate ||
+           clk->max_rate < clk->core->req_rate)
+               clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
 
        owner = clk->core->owner;
        kref_put(&clk->core->ref, __clk_release);
index fd2339cc5898a9b1155da583d55c6c9ddd0a532c..6731a822f4e38f93494e8e001c9078148c3cf942 100644 (file)
@@ -760,65 +760,9 @@ static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
        clk_put(user1);
 }
 
-/*
- * Test that if we have several subsequent calls to
- * clk_set_rate_range(), across multiple users, the core will reevaluate
- * whether a new rate is needed, including when a user drop its clock.
- *
- * With clk_dummy_maximize_rate_ops, this means that the rate will
- * trail along the maximum as it evolves.
- */
-static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test)
-{
-       struct clk_dummy_context *ctx = test->priv;
-       struct clk_hw *hw = &ctx->hw;
-       struct clk *clk = hw->clk;
-       struct clk *user1, *user2;
-       unsigned long rate;
-
-       user1 = clk_hw_get_clk(hw, NULL);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
-
-       user2 = clk_hw_get_clk(hw, NULL);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
-
-       KUNIT_ASSERT_EQ(test,
-                       clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
-                       0);
-
-       KUNIT_ASSERT_EQ(test,
-                       clk_set_rate_range(user1,
-                                          0,
-                                          DUMMY_CLOCK_RATE_2),
-                       0);
-
-       rate = clk_get_rate(clk);
-       KUNIT_ASSERT_GT(test, rate, 0);
-       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
-
-       KUNIT_ASSERT_EQ(test,
-                       clk_set_rate_range(user2,
-                                          0,
-                                          DUMMY_CLOCK_RATE_1),
-                       0);
-
-       rate = clk_get_rate(clk);
-       KUNIT_ASSERT_GT(test, rate, 0);
-       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
-
-       clk_put(user2);
-
-       rate = clk_get_rate(clk);
-       KUNIT_ASSERT_GT(test, rate, 0);
-       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
-
-       clk_put(user1);
-}
-
 static struct kunit_case clk_range_maximize_test_cases[] = {
        KUNIT_CASE(clk_range_test_set_range_rate_maximized),
        KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
-       KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized),
        {}
 };
 
@@ -933,61 +877,9 @@ static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
        clk_put(user1);
 }
 
-/*
- * Test that if we have several subsequent calls to
- * clk_set_rate_range(), across multiple users, the core will reevaluate
- * whether a new rate is needed, including when a user drop its clock.
- *
- * With clk_dummy_minimize_rate_ops, this means that the rate will
- * trail along the minimum as it evolves.
- */
-static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test)
-{
-       struct clk_dummy_context *ctx = test->priv;
-       struct clk_hw *hw = &ctx->hw;
-       struct clk *clk = hw->clk;
-       struct clk *user1, *user2;
-       unsigned long rate;
-
-       user1 = clk_hw_get_clk(hw, NULL);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
-
-       user2 = clk_hw_get_clk(hw, NULL);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
-
-       KUNIT_ASSERT_EQ(test,
-                       clk_set_rate_range(user1,
-                                          DUMMY_CLOCK_RATE_1,
-                                          ULONG_MAX),
-                       0);
-
-       rate = clk_get_rate(clk);
-       KUNIT_ASSERT_GT(test, rate, 0);
-       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
-
-       KUNIT_ASSERT_EQ(test,
-                       clk_set_rate_range(user2,
-                                          DUMMY_CLOCK_RATE_2,
-                                          ULONG_MAX),
-                       0);
-
-       rate = clk_get_rate(clk);
-       KUNIT_ASSERT_GT(test, rate, 0);
-       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
-
-       clk_put(user2);
-
-       rate = clk_get_rate(clk);
-       KUNIT_ASSERT_GT(test, rate, 0);
-       KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
-
-       clk_put(user1);
-}
-
 static struct kunit_case clk_range_minimize_test_cases[] = {
        KUNIT_CASE(clk_range_test_set_range_rate_minimized),
        KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
-       KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized),
        {}
 };
 
index 68a94e5af8eda1a8d5d4ce81dc8ebfc4339ee344..461537679c04593b0dd6f6d219cfce59ddba7985 100644 (file)
@@ -69,6 +69,11 @@ config SUN6I_A31_CCU
        default MACH_SUN6I
        depends on MACH_SUN6I || COMPILE_TEST
 
+config SUN6I_RTC_CCU
+       tristate "Support for the Allwinner H616/R329 RTC CCU"
+       default ARCH_SUNXI
+       depends on ARCH_SUNXI || COMPILE_TEST
+
 config SUN8I_A23_CCU
        tristate "Support for the Allwinner A23 CCU"
        default MACH_SUN8I
index ec931cb7aa143976b7339b1cd2f5042cdc7c91a8..6b3ae2b620db69534535872fa20da00ed1fee7b5 100644 (file)
@@ -36,6 +36,7 @@ obj-$(CONFIG_SUN50I_H616_CCU) += sun50i-h616-ccu.o
 obj-$(CONFIG_SUN4I_A10_CCU)    += sun4i-a10-ccu.o
 obj-$(CONFIG_SUN5I_CCU)                += sun5i-ccu.o
 obj-$(CONFIG_SUN6I_A31_CCU)    += sun6i-a31-ccu.o
+obj-$(CONFIG_SUN6I_RTC_CCU)    += sun6i-rtc-ccu.o
 obj-$(CONFIG_SUN8I_A23_CCU)    += sun8i-a23-ccu.o
 obj-$(CONFIG_SUN8I_A33_CCU)    += sun8i-a33-ccu.o
 obj-$(CONFIG_SUN8I_A83T_CCU)   += sun8i-a83t-ccu.o
@@ -60,6 +61,7 @@ sun50i-h616-ccu-y             += ccu-sun50i-h616.o
 sun4i-a10-ccu-y                        += ccu-sun4i-a10.o
 sun5i-ccu-y                    += ccu-sun5i.o
 sun6i-a31-ccu-y                        += ccu-sun6i-a31.o
+sun6i-rtc-ccu-y                        += ccu-sun6i-rtc.o
 sun8i-a23-ccu-y                        += ccu-sun8i-a23.o
 sun8i-a33-ccu-y                        += ccu-sun8i-a33.o
 sun8i-a83t-ccu-y               += ccu-sun8i-a83t.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
new file mode 100644 (file)
index 0000000..8a10bad
--- /dev/null
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 Samuel Holland <samuel@sholland.org>
+//
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+#include <linux/clk/sunxi-ng.h>
+
+#include "ccu_common.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mux.h"
+
+#include "ccu-sun6i-rtc.h"
+
+#define IOSC_ACCURACY                  300000000 /* 30% */
+#define IOSC_RATE                      16000000
+
+#define LOSC_RATE                      32768
+#define LOSC_RATE_SHIFT                        15
+
+#define LOSC_CTRL_REG                  0x0
+#define LOSC_CTRL_KEY                  0x16aa0000
+
+#define IOSC_32K_CLK_DIV_REG           0x8
+#define IOSC_32K_CLK_DIV               GENMASK(4, 0)
+#define IOSC_32K_PRE_DIV               32
+
+#define IOSC_CLK_CALI_REG              0xc
+#define IOSC_CLK_CALI_DIV_ONES         22
+#define IOSC_CLK_CALI_EN               BIT(1)
+#define IOSC_CLK_CALI_SRC_SEL          BIT(0)
+
+#define LOSC_OUT_GATING_REG            0x60
+
+#define DCXO_CTRL_REG                  0x160
+#define DCXO_CTRL_CLK16M_RC_EN         BIT(0)
+
+struct sun6i_rtc_match_data {
+       bool                            have_ext_osc32k         : 1;
+       bool                            have_iosc_calibration   : 1;
+       bool                            rtc_32k_single_parent   : 1;
+       const struct clk_parent_data    *osc32k_fanout_parents;
+       u8                              osc32k_fanout_nparents;
+};
+
+static bool have_iosc_calibration;
+
+static int ccu_iosc_enable(struct clk_hw *hw)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+
+       return ccu_gate_helper_enable(cm, DCXO_CTRL_CLK16M_RC_EN);
+}
+
+static void ccu_iosc_disable(struct clk_hw *hw)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+
+       return ccu_gate_helper_disable(cm, DCXO_CTRL_CLK16M_RC_EN);
+}
+
+static int ccu_iosc_is_enabled(struct clk_hw *hw)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+
+       return ccu_gate_helper_is_enabled(cm, DCXO_CTRL_CLK16M_RC_EN);
+}
+
+static unsigned long ccu_iosc_recalc_rate(struct clk_hw *hw,
+                                         unsigned long parent_rate)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+
+       if (have_iosc_calibration) {
+               u32 reg = readl(cm->base + IOSC_CLK_CALI_REG);
+
+               /*
+                * Recover the IOSC frequency by shifting the ones place of
+                * (fixed-point divider * 32768) into bit zero.
+                */
+               if (reg & IOSC_CLK_CALI_EN)
+                       return reg >> (IOSC_CLK_CALI_DIV_ONES - LOSC_RATE_SHIFT);
+       }
+
+       return IOSC_RATE;
+}
+
+static unsigned long ccu_iosc_recalc_accuracy(struct clk_hw *hw,
+                                             unsigned long parent_accuracy)
+{
+       return IOSC_ACCURACY;
+}
+
+static const struct clk_ops ccu_iosc_ops = {
+       .enable                 = ccu_iosc_enable,
+       .disable                = ccu_iosc_disable,
+       .is_enabled             = ccu_iosc_is_enabled,
+       .recalc_rate            = ccu_iosc_recalc_rate,
+       .recalc_accuracy        = ccu_iosc_recalc_accuracy,
+};
+
+static struct ccu_common iosc_clk = {
+       .reg            = DCXO_CTRL_REG,
+       .hw.init        = CLK_HW_INIT_NO_PARENT("iosc", &ccu_iosc_ops,
+                                               CLK_GET_RATE_NOCACHE),
+};
+
+static int ccu_iosc_32k_prepare(struct clk_hw *hw)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+       u32 val;
+
+       if (!have_iosc_calibration)
+               return 0;
+
+       val = readl(cm->base + IOSC_CLK_CALI_REG);
+       writel(val | IOSC_CLK_CALI_EN | IOSC_CLK_CALI_SRC_SEL,
+              cm->base + IOSC_CLK_CALI_REG);
+
+       return 0;
+}
+
+static void ccu_iosc_32k_unprepare(struct clk_hw *hw)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+       u32 val;
+
+       if (!have_iosc_calibration)
+               return;
+
+       val = readl(cm->base + IOSC_CLK_CALI_REG);
+       writel(val & ~(IOSC_CLK_CALI_EN | IOSC_CLK_CALI_SRC_SEL),
+              cm->base + IOSC_CLK_CALI_REG);
+}
+
+static unsigned long ccu_iosc_32k_recalc_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+       u32 val;
+
+       if (have_iosc_calibration) {
+               val = readl(cm->base + IOSC_CLK_CALI_REG);
+
+               /* Assume the calibrated 32k clock is accurate. */
+               if (val & IOSC_CLK_CALI_SRC_SEL)
+                       return LOSC_RATE;
+       }
+
+       val = readl(cm->base + IOSC_32K_CLK_DIV_REG) & IOSC_32K_CLK_DIV;
+
+       return parent_rate / IOSC_32K_PRE_DIV / (val + 1);
+}
+
+static unsigned long ccu_iosc_32k_recalc_accuracy(struct clk_hw *hw,
+                                                 unsigned long parent_accuracy)
+{
+       struct ccu_common *cm = hw_to_ccu_common(hw);
+       u32 val;
+
+       if (have_iosc_calibration) {
+               val = readl(cm->base + IOSC_CLK_CALI_REG);
+
+               /* Assume the calibrated 32k clock is accurate. */
+               if (val & IOSC_CLK_CALI_SRC_SEL)
+                       return 0;
+       }
+
+       return parent_accuracy;
+}
+
+static const struct clk_ops ccu_iosc_32k_ops = {
+       .prepare                = ccu_iosc_32k_prepare,
+       .unprepare              = ccu_iosc_32k_unprepare,
+       .recalc_rate            = ccu_iosc_32k_recalc_rate,
+       .recalc_accuracy        = ccu_iosc_32k_recalc_accuracy,
+};
+
+static struct ccu_common iosc_32k_clk = {
+       .hw.init        = CLK_HW_INIT_HW("iosc-32k", &iosc_clk.hw,
+                                        &ccu_iosc_32k_ops,
+                                        CLK_GET_RATE_NOCACHE),
+};
+
+static const struct clk_hw *ext_osc32k[] = { NULL }; /* updated during probe */
+
+static SUNXI_CCU_GATE_HWS(ext_osc32k_gate_clk, "ext-osc32k-gate",
+                         ext_osc32k, 0x0, BIT(4), 0);
+
+static const struct clk_hw *osc32k_parents[] = {
+       &iosc_32k_clk.hw,
+       &ext_osc32k_gate_clk.common.hw
+};
+
+static struct clk_init_data osc32k_init_data = {
+       .name           = "osc32k",
+       .ops            = &ccu_mux_ops,
+       .parent_hws     = osc32k_parents,
+       .num_parents    = ARRAY_SIZE(osc32k_parents), /* updated during probe */
+};
+
+static struct ccu_mux osc32k_clk = {
+       .mux    = _SUNXI_CCU_MUX(0, 1),
+       .common = {
+               .reg            = LOSC_CTRL_REG,
+               .features       = CCU_FEATURE_KEY_FIELD,
+               .hw.init        = &osc32k_init_data,
+       },
+};
+
+/* This falls back to the global name for fwnodes without a named reference. */
+static const struct clk_parent_data osc24M[] = {
+       { .fw_name = "hosc", .name = "osc24M" }
+};
+
+static struct ccu_gate osc24M_32k_clk = {
+       .enable = BIT(16),
+       .common = {
+               .reg            = LOSC_OUT_GATING_REG,
+               .prediv         = 750,
+               .features       = CCU_FEATURE_ALL_PREDIV,
+               .hw.init        = CLK_HW_INIT_PARENTS_DATA("osc24M-32k", osc24M,
+                                                          &ccu_gate_ops, 0),
+       },
+};
+
+static const struct clk_hw *rtc_32k_parents[] = {
+       &osc32k_clk.common.hw,
+       &osc24M_32k_clk.common.hw
+};
+
+static struct clk_init_data rtc_32k_init_data = {
+       .name           = "rtc-32k",
+       .ops            = &ccu_mux_ops,
+       .parent_hws     = rtc_32k_parents,
+       .num_parents    = ARRAY_SIZE(rtc_32k_parents), /* updated during probe */
+};
+
+static struct ccu_mux rtc_32k_clk = {
+       .mux    = _SUNXI_CCU_MUX(1, 1),
+       .common = {
+               .reg            = LOSC_CTRL_REG,
+               .features       = CCU_FEATURE_KEY_FIELD,
+               .hw.init        = &rtc_32k_init_data,
+       },
+};
+
+static struct clk_init_data osc32k_fanout_init_data = {
+       .name           = "osc32k-fanout",
+       .ops            = &ccu_mux_ops,
+       /* parents are set during probe */
+};
+
+static struct ccu_mux osc32k_fanout_clk = {
+       .enable = BIT(0),
+       .mux    = _SUNXI_CCU_MUX(1, 2),
+       .common = {
+               .reg            = LOSC_OUT_GATING_REG,
+               .hw.init        = &osc32k_fanout_init_data,
+       },
+};
+
+static struct ccu_common *sun6i_rtc_ccu_clks[] = {
+       &iosc_clk,
+       &iosc_32k_clk,
+       &ext_osc32k_gate_clk.common,
+       &osc32k_clk.common,
+       &osc24M_32k_clk.common,
+       &rtc_32k_clk.common,
+       &osc32k_fanout_clk.common,
+};
+
+static struct clk_hw_onecell_data sun6i_rtc_ccu_hw_clks = {
+       .num = CLK_NUMBER,
+       .hws = {
+               [CLK_OSC32K]            = &osc32k_clk.common.hw,
+               [CLK_OSC32K_FANOUT]     = &osc32k_fanout_clk.common.hw,
+               [CLK_IOSC]              = &iosc_clk.hw,
+               [CLK_IOSC_32K]          = &iosc_32k_clk.hw,
+               [CLK_EXT_OSC32K_GATE]   = &ext_osc32k_gate_clk.common.hw,
+               [CLK_OSC24M_32K]        = &osc24M_32k_clk.common.hw,
+               [CLK_RTC_32K]           = &rtc_32k_clk.common.hw,
+       },
+};
+
+static const struct sunxi_ccu_desc sun6i_rtc_ccu_desc = {
+       .ccu_clks       = sun6i_rtc_ccu_clks,
+       .num_ccu_clks   = ARRAY_SIZE(sun6i_rtc_ccu_clks),
+
+       .hw_clks        = &sun6i_rtc_ccu_hw_clks,
+};
+
+static const struct clk_parent_data sun50i_h6_osc32k_fanout_parents[] = {
+       { .hw = &osc32k_clk.common.hw },
+};
+
+static const struct clk_parent_data sun50i_h616_osc32k_fanout_parents[] = {
+       { .hw = &osc32k_clk.common.hw },
+       { .fw_name = "pll-32k" },
+       { .hw = &osc24M_32k_clk.common.hw }
+};
+
+static const struct clk_parent_data sun50i_r329_osc32k_fanout_parents[] = {
+       { .hw = &osc32k_clk.common.hw },
+       { .hw = &ext_osc32k_gate_clk.common.hw },
+       { .hw = &osc24M_32k_clk.common.hw }
+};
+
+static const struct sun6i_rtc_match_data sun50i_h6_rtc_ccu_data = {
+       .have_ext_osc32k        = true,
+       .have_iosc_calibration  = true,
+       .osc32k_fanout_parents  = sun50i_h6_osc32k_fanout_parents,
+       .osc32k_fanout_nparents = ARRAY_SIZE(sun50i_h6_osc32k_fanout_parents),
+};
+
+static const struct sun6i_rtc_match_data sun50i_h616_rtc_ccu_data = {
+       .have_iosc_calibration  = true,
+       .rtc_32k_single_parent  = true,
+       .osc32k_fanout_parents  = sun50i_h616_osc32k_fanout_parents,
+       .osc32k_fanout_nparents = ARRAY_SIZE(sun50i_h616_osc32k_fanout_parents),
+};
+
+static const struct sun6i_rtc_match_data sun50i_r329_rtc_ccu_data = {
+       .have_ext_osc32k        = true,
+       .osc32k_fanout_parents  = sun50i_r329_osc32k_fanout_parents,
+       .osc32k_fanout_nparents = ARRAY_SIZE(sun50i_r329_osc32k_fanout_parents),
+};
+
+static const struct of_device_id sun6i_rtc_ccu_match[] = {
+       {
+               .compatible     = "allwinner,sun50i-h6-rtc",
+               .data           = &sun50i_h6_rtc_ccu_data,
+       },
+       {
+               .compatible     = "allwinner,sun50i-h616-rtc",
+               .data           = &sun50i_h616_rtc_ccu_data,
+       },
+       {
+               .compatible     = "allwinner,sun50i-r329-rtc",
+               .data           = &sun50i_r329_rtc_ccu_data,
+       },
+};
+
+int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg)
+{
+       const struct sun6i_rtc_match_data *data;
+       struct clk *ext_osc32k_clk = NULL;
+       const struct of_device_id *match;
+
+       /* This driver is only used for newer variants of the hardware. */
+       match = of_match_device(sun6i_rtc_ccu_match, dev);
+       if (!match)
+               return 0;
+
+       data = match->data;
+       have_iosc_calibration = data->have_iosc_calibration;
+
+       if (data->have_ext_osc32k) {
+               const char *fw_name;
+
+               /* ext-osc32k was the only input clock in the old binding. */
+               fw_name = of_property_read_bool(dev->of_node, "clock-names")
+                       ? "ext-osc32k" : NULL;
+               ext_osc32k_clk = devm_clk_get_optional(dev, fw_name);
+               if (IS_ERR(ext_osc32k_clk))
+                       return PTR_ERR(ext_osc32k_clk);
+       }
+
+       if (ext_osc32k_clk) {
+               /* Link ext-osc32k-gate to its parent. */
+               *ext_osc32k = __clk_get_hw(ext_osc32k_clk);
+       } else {
+               /* ext-osc32k-gate is an orphan, so do not register it. */
+               sun6i_rtc_ccu_hw_clks.hws[CLK_EXT_OSC32K_GATE] = NULL;
+               osc32k_init_data.num_parents = 1;
+       }
+
+       if (data->rtc_32k_single_parent)
+               rtc_32k_init_data.num_parents = 1;
+
+       osc32k_fanout_init_data.parent_data = data->osc32k_fanout_parents;
+       osc32k_fanout_init_data.num_parents = data->osc32k_fanout_nparents;
+
+       return devm_sunxi_ccu_probe(dev, reg, &sun6i_rtc_ccu_desc);
+}
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.h b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.h
new file mode 100644 (file)
index 0000000..9ae821f
--- /dev/null
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _CCU_SUN6I_RTC_H
+#define _CCU_SUN6I_RTC_H
+
+#include <dt-bindings/clock/sun6i-rtc.h>
+
+#define CLK_IOSC_32K           3
+#define CLK_EXT_OSC32K_GATE    4
+#define CLK_OSC24M_32K         5
+#define CLK_RTC_32K            6
+
+#define CLK_NUMBER             (CLK_RTC_32K + 1)
+
+#endif /* _CCU_SUN6I_RTC_H */
index 98a1834b58bb4f6b545a6a9877d2d98680cb8dcb..fbf16c6b896d4e3b06d0407e8636a55c79967aab 100644 (file)
@@ -17,6 +17,7 @@
 #define CCU_FEATURE_LOCK_REG           BIT(5)
 #define CCU_FEATURE_MMC_TIMING_SWITCH  BIT(6)
 #define CCU_FEATURE_SIGMA_DELTA_MOD    BIT(7)
+#define CCU_FEATURE_KEY_FIELD          BIT(8)
 
 /* MMC timing mode switch bit */
 #define CCU_MMC_NEW_TIMING_MODE                BIT(30)
index 2306a1cd83e4615ac038a78076dcbb304bcc1783..1d557e32316915787819b65959f1633e48e45d60 100644 (file)
@@ -12,6 +12,8 @@
 #include "ccu_gate.h"
 #include "ccu_mux.h"
 
+#define CCU_MUX_KEY_VALUE              0x16aa0000
+
 static u16 ccu_mux_get_prediv(struct ccu_common *common,
                              struct ccu_mux_internal *cm,
                              int parent_index)
@@ -191,6 +193,11 @@ int ccu_mux_helper_set_parent(struct ccu_common *common,
        spin_lock_irqsave(common->lock, flags);
 
        reg = readl(common->base + common->reg);
+
+       /* The key field always reads as zero. */
+       if (common->features & CCU_FEATURE_KEY_FIELD)
+               reg |= CCU_MUX_KEY_VALUE;
+
        reg &= ~GENMASK(cm->width + cm->shift - 1, cm->shift);
        writel(reg | (index << cm->shift), common->base + common->reg);
 
index c0aeedd66f022ae7f92492d220a14685707338fc..ff71dd662880d9c6874962c3201dd38f9944665a 100644 (file)
@@ -47,6 +47,10 @@ config CPU_IDLE_GOV_HALTPOLL
 config DT_IDLE_STATES
        bool
 
+config DT_IDLE_GENPD
+       depends on PM_GENERIC_DOMAINS_OF
+       bool
+
 menu "ARM CPU Idle Drivers"
 depends on ARM || ARM64
 source "drivers/cpuidle/Kconfig.arm"
@@ -62,6 +66,11 @@ depends on PPC
 source "drivers/cpuidle/Kconfig.powerpc"
 endmenu
 
+menu "RISC-V CPU Idle Drivers"
+depends on RISCV
+source "drivers/cpuidle/Kconfig.riscv"
+endmenu
+
 config HALTPOLL_CPUIDLE
        tristate "Halt poll cpuidle driver"
        depends on X86 && KVM_GUEST
index 15d6c46c0a47771f7e4ee548dad11f628560ad17..be7f512109f793dd81b6c395aa1f257b5c2b241a 100644 (file)
@@ -27,6 +27,7 @@ config ARM_PSCI_CPUIDLE_DOMAIN
        bool "PSCI CPU idle Domain"
        depends on ARM_PSCI_CPUIDLE
        depends on PM_GENERIC_DOMAINS_OF
+       select DT_IDLE_GENPD
        default y
        help
          Select this to enable the PSCI based CPUidle driver to use PM domains,
diff --git a/drivers/cpuidle/Kconfig.riscv b/drivers/cpuidle/Kconfig.riscv
new file mode 100644 (file)
index 0000000..78518c2
--- /dev/null
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# RISC-V CPU Idle drivers
+#
+
+config RISCV_SBI_CPUIDLE
+       bool "RISC-V SBI CPU idle Driver"
+       depends on RISCV_SBI
+       select DT_IDLE_STATES
+       select CPU_IDLE_MULTIPLE_DRIVERS
+       select DT_IDLE_GENPD if PM_GENERIC_DOMAINS_OF
+       help
+         Select this option to enable RISC-V SBI firmware based CPU idle
+         driver for RISC-V systems. This drivers also supports hierarchical
+         DT based layout of the idle state.
index 26bbc5e7412366aef699a453d90d5810f191b6b4..d103342b7cfc2120d2981dc1f61be9029aa69c54 100644 (file)
@@ -6,6 +6,7 @@
 obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
 obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
 obj-$(CONFIG_DT_IDLE_STATES)             += dt_idle_states.o
+obj-$(CONFIG_DT_IDLE_GENPD)              += dt_idle_genpd.o
 obj-$(CONFIG_ARCH_HAS_CPU_RELAX)         += poll_state.o
 obj-$(CONFIG_HALTPOLL_CPUIDLE)           += cpuidle-haltpoll.o
 
@@ -34,3 +35,7 @@ obj-$(CONFIG_MIPS_CPS_CPUIDLE)                += cpuidle-cps.o
 # POWERPC drivers
 obj-$(CONFIG_PSERIES_CPUIDLE)          += cpuidle-pseries.o
 obj-$(CONFIG_POWERNV_CPUIDLE)          += cpuidle-powernv.o
+
+###############################################################################
+# RISC-V drivers
+obj-$(CONFIG_RISCV_SBI_CPUIDLE)                += cpuidle-riscv-sbi.o
index ff2c3f8e4668ae07b674b6d4a1ce036c563d6762..755bbdfc5b82ff67cdd3878b6083113181758a9b 100644 (file)
@@ -47,73 +47,14 @@ static int psci_pd_power_off(struct generic_pm_domain *pd)
        return 0;
 }
 
-static int psci_pd_parse_state_nodes(struct genpd_power_state *states,
-                                    int state_count)
-{
-       int i, ret;
-       u32 psci_state, *psci_state_buf;
-
-       for (i = 0; i < state_count; i++) {
-               ret = psci_dt_parse_state_node(to_of_node(states[i].fwnode),
-                                       &psci_state);
-               if (ret)
-                       goto free_state;
-
-               psci_state_buf = kmalloc(sizeof(u32), GFP_KERNEL);
-               if (!psci_state_buf) {
-                       ret = -ENOMEM;
-                       goto free_state;
-               }
-               *psci_state_buf = psci_state;
-               states[i].data = psci_state_buf;
-       }
-
-       return 0;
-
-free_state:
-       i--;
-       for (; i >= 0; i--)
-               kfree(states[i].data);
-       return ret;
-}
-
-static int psci_pd_parse_states(struct device_node *np,
-                       struct genpd_power_state **states, int *state_count)
-{
-       int ret;
-
-       /* Parse the domain idle states. */
-       ret = of_genpd_parse_idle_states(np, states, state_count);
-       if (ret)
-               return ret;
-
-       /* Fill out the PSCI specifics for each found state. */
-       ret = psci_pd_parse_state_nodes(*states, *state_count);
-       if (ret)
-               kfree(*states);
-
-       return ret;
-}
-
-static void psci_pd_free_states(struct genpd_power_state *states,
-                               unsigned int state_count)
-{
-       int i;
-
-       for (i = 0; i < state_count; i++)
-               kfree(states[i].data);
-       kfree(states);
-}
-
 static int psci_pd_init(struct device_node *np, bool use_osi)
 {
        struct generic_pm_domain *pd;
        struct psci_pd_provider *pd_provider;
        struct dev_power_governor *pd_gov;
-       struct genpd_power_state *states = NULL;
        int ret = -ENOMEM, state_count = 0;
 
-       pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+       pd = dt_idle_pd_alloc(np, psci_dt_parse_state_node);
        if (!pd)
                goto out;
 
@@ -121,22 +62,6 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
        if (!pd_provider)
                goto free_pd;
 
-       pd->name = kasprintf(GFP_KERNEL, "%pOF", np);
-       if (!pd->name)
-               goto free_pd_prov;
-
-       /*
-        * Parse the domain idle states and let genpd manage the state selection
-        * for those being compatible with "domain-idle-state".
-        */
-       ret = psci_pd_parse_states(np, &states, &state_count);
-       if (ret)
-               goto free_name;
-
-       pd->free_states = psci_pd_free_states;
-       pd->name = kbasename(pd->name);
-       pd->states = states;
-       pd->state_count = state_count;
        pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
 
        /* Allow power off when OSI has been successfully enabled. */
@@ -149,10 +74,8 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
        pd_gov = state_count > 0 ? &pm_domain_cpu_gov : NULL;
 
        ret = pm_genpd_init(pd, pd_gov, false);
-       if (ret) {
-               psci_pd_free_states(states, state_count);
-               goto free_name;
-       }
+       if (ret)
+               goto free_pd_prov;
 
        ret = of_genpd_add_provider_simple(np, pd);
        if (ret)
@@ -166,12 +89,10 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
 
 remove_pd:
        pm_genpd_remove(pd);
-free_name:
-       kfree(pd->name);
 free_pd_prov:
        kfree(pd_provider);
 free_pd:
-       kfree(pd);
+       dt_idle_pd_free(pd);
 out:
        pr_err("failed to init PM domain ret=%d %pOF\n", ret, np);
        return ret;
@@ -195,30 +116,6 @@ static void psci_pd_remove(void)
        }
 }
 
-static int psci_pd_init_topology(struct device_node *np)
-{
-       struct device_node *node;
-       struct of_phandle_args child, parent;
-       int ret;
-
-       for_each_child_of_node(np, node) {
-               if (of_parse_phandle_with_args(node, "power-domains",
-                                       "#power-domain-cells", 0, &parent))
-                       continue;
-
-               child.np = node;
-               child.args_count = 0;
-               ret = of_genpd_add_subdomain(&parent, &child);
-               of_node_put(parent.np);
-               if (ret) {
-                       of_node_put(node);
-                       return ret;
-               }
-       }
-
-       return 0;
-}
-
 static bool psci_pd_try_set_osi_mode(void)
 {
        int ret;
@@ -282,7 +179,7 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev)
                goto no_pd;
 
        /* Link genpd masters/subdomains to model the CPU topology. */
-       ret = psci_pd_init_topology(np);
+       ret = dt_idle_pd_init_topology(np);
        if (ret)
                goto remove_pd;
 
@@ -314,28 +211,3 @@ static int __init psci_idle_init_domains(void)
        return platform_driver_register(&psci_cpuidle_domain_driver);
 }
 subsys_initcall(psci_idle_init_domains);
-
-struct device *psci_dt_attach_cpu(int cpu)
-{
-       struct device *dev;
-
-       dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), "psci");
-       if (IS_ERR_OR_NULL(dev))
-               return dev;
-
-       pm_runtime_irq_safe(dev);
-       if (cpu_online(cpu))
-               pm_runtime_get_sync(dev);
-
-       dev_pm_syscore_device(dev, true);
-
-       return dev;
-}
-
-void psci_dt_detach_cpu(struct device *dev)
-{
-       if (IS_ERR_OR_NULL(dev))
-               return;
-
-       dev_pm_domain_detach(dev, false);
-}
index d8e925e84c27af3d400f6f972d4ffe24dcf0c54b..4e132640ed64851a5990fca21eab08142639d095 100644 (file)
@@ -10,8 +10,19 @@ void psci_set_domain_state(u32 state);
 int psci_dt_parse_state_node(struct device_node *np, u32 *state);
 
 #ifdef CONFIG_ARM_PSCI_CPUIDLE_DOMAIN
-struct device *psci_dt_attach_cpu(int cpu);
-void psci_dt_detach_cpu(struct device *dev);
+
+#include "dt_idle_genpd.h"
+
+static inline struct device *psci_dt_attach_cpu(int cpu)
+{
+       return dt_idle_attach_cpu(cpu, "psci");
+}
+
+static inline void psci_dt_detach_cpu(struct device *dev)
+{
+       dt_idle_detach_cpu(dev);
+}
+
 #else
 static inline struct device *psci_dt_attach_cpu(int cpu) { return NULL; }
 static inline void psci_dt_detach_cpu(struct device *dev) { }
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
new file mode 100644 (file)
index 0000000..b459eda
--- /dev/null
@@ -0,0 +1,627 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * RISC-V SBI CPU idle driver.
+ *
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt
+
+#include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpu_cooling.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <asm/cpuidle.h>
+#include <asm/sbi.h>
+#include <asm/suspend.h>
+
+#include "dt_idle_states.h"
+#include "dt_idle_genpd.h"
+
+struct sbi_cpuidle_data {
+       u32 *states;
+       struct device *dev;
+};
+
+struct sbi_domain_state {
+       bool available;
+       u32 state;
+};
+
+static DEFINE_PER_CPU_READ_MOSTLY(struct sbi_cpuidle_data, sbi_cpuidle_data);
+static DEFINE_PER_CPU(struct sbi_domain_state, domain_state);
+static bool sbi_cpuidle_use_osi;
+static bool sbi_cpuidle_use_cpuhp;
+static bool sbi_cpuidle_pd_allow_domain_state;
+
+static inline void sbi_set_domain_state(u32 state)
+{
+       struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
+
+       data->available = true;
+       data->state = state;
+}
+
+static inline u32 sbi_get_domain_state(void)
+{
+       struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
+
+       return data->state;
+}
+
+static inline void sbi_clear_domain_state(void)
+{
+       struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
+
+       data->available = false;
+}
+
+static inline bool sbi_is_domain_state_available(void)
+{
+       struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
+
+       return data->available;
+}
+
+static int sbi_suspend_finisher(unsigned long suspend_type,
+                               unsigned long resume_addr,
+                               unsigned long opaque)
+{
+       struct sbiret ret;
+
+       ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND,
+                       suspend_type, resume_addr, opaque, 0, 0, 0);
+
+       return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0;
+}
+
+static int sbi_suspend(u32 state)
+{
+       if (state & SBI_HSM_SUSP_NON_RET_BIT)
+               return cpu_suspend(state, sbi_suspend_finisher);
+       else
+               return sbi_suspend_finisher(state, 0, 0);
+}
+
+static int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
+                                  struct cpuidle_driver *drv, int idx)
+{
+       u32 *states = __this_cpu_read(sbi_cpuidle_data.states);
+
+       return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, states[idx]);
+}
+
+static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
+                                         struct cpuidle_driver *drv, int idx,
+                                         bool s2idle)
+{
+       struct sbi_cpuidle_data *data = this_cpu_ptr(&sbi_cpuidle_data);
+       u32 *states = data->states;
+       struct device *pd_dev = data->dev;
+       u32 state;
+       int ret;
+
+       ret = cpu_pm_enter();
+       if (ret)
+               return -1;
+
+       /* Do runtime PM to manage a hierarchical CPU toplogy. */
+       rcu_irq_enter_irqson();
+       if (s2idle)
+               dev_pm_genpd_suspend(pd_dev);
+       else
+               pm_runtime_put_sync_suspend(pd_dev);
+       rcu_irq_exit_irqson();
+
+       if (sbi_is_domain_state_available())
+               state = sbi_get_domain_state();
+       else
+               state = states[idx];
+
+       ret = sbi_suspend(state) ? -1 : idx;
+
+       rcu_irq_enter_irqson();
+       if (s2idle)
+               dev_pm_genpd_resume(pd_dev);
+       else
+               pm_runtime_get_sync(pd_dev);
+       rcu_irq_exit_irqson();
+
+       cpu_pm_exit();
+
+       /* Clear the domain state to start fresh when back from idle. */
+       sbi_clear_domain_state();
+       return ret;
+}
+
+static int sbi_enter_domain_idle_state(struct cpuidle_device *dev,
+                                      struct cpuidle_driver *drv, int idx)
+{
+       return __sbi_enter_domain_idle_state(dev, drv, idx, false);
+}
+
+static int sbi_enter_s2idle_domain_idle_state(struct cpuidle_device *dev,
+                                             struct cpuidle_driver *drv,
+                                             int idx)
+{
+       return __sbi_enter_domain_idle_state(dev, drv, idx, true);
+}
+
+static int sbi_cpuidle_cpuhp_up(unsigned int cpu)
+{
+       struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev);
+
+       if (pd_dev)
+               pm_runtime_get_sync(pd_dev);
+
+       return 0;
+}
+
+static int sbi_cpuidle_cpuhp_down(unsigned int cpu)
+{
+       struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev);
+
+       if (pd_dev) {
+               pm_runtime_put_sync(pd_dev);
+               /* Clear domain state to start fresh at next online. */
+               sbi_clear_domain_state();
+       }
+
+       return 0;
+}
+
+static void sbi_idle_init_cpuhp(void)
+{
+       int err;
+
+       if (!sbi_cpuidle_use_cpuhp)
+               return;
+
+       err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
+                                       "cpuidle/sbi:online",
+                                       sbi_cpuidle_cpuhp_up,
+                                       sbi_cpuidle_cpuhp_down);
+       if (err)
+               pr_warn("Failed %d while setup cpuhp state\n", err);
+}
+
+static const struct of_device_id sbi_cpuidle_state_match[] = {
+       { .compatible = "riscv,idle-state",
+         .data = sbi_cpuidle_enter_state },
+       { },
+};
+
+static bool sbi_suspend_state_is_valid(u32 state)
+{
+       if (state > SBI_HSM_SUSPEND_RET_DEFAULT &&
+           state < SBI_HSM_SUSPEND_RET_PLATFORM)
+               return false;
+       if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT &&
+           state < SBI_HSM_SUSPEND_NON_RET_PLATFORM)
+               return false;
+       return true;
+}
+
+static int sbi_dt_parse_state_node(struct device_node *np, u32 *state)
+{
+       int err = of_property_read_u32(np, "riscv,sbi-suspend-param", state);
+
+       if (err) {
+               pr_warn("%pOF missing riscv,sbi-suspend-param property\n", np);
+               return err;
+       }
+
+       if (!sbi_suspend_state_is_valid(*state)) {
+               pr_warn("Invalid SBI suspend state %#x\n", *state);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int sbi_dt_cpu_init_topology(struct cpuidle_driver *drv,
+                                    struct sbi_cpuidle_data *data,
+                                    unsigned int state_count, int cpu)
+{
+       /* Currently limit the hierarchical topology to be used in OSI mode. */
+       if (!sbi_cpuidle_use_osi)
+               return 0;
+
+       data->dev = dt_idle_attach_cpu(cpu, "sbi");
+       if (IS_ERR_OR_NULL(data->dev))
+               return PTR_ERR_OR_ZERO(data->dev);
+
+       /*
+        * Using the deepest state for the CPU to trigger a potential selection
+        * of a shared state for the domain, assumes the domain states are all
+        * deeper states.
+        */
+       drv->states[state_count - 1].enter = sbi_enter_domain_idle_state;
+       drv->states[state_count - 1].enter_s2idle =
+                                       sbi_enter_s2idle_domain_idle_state;
+       sbi_cpuidle_use_cpuhp = true;
+
+       return 0;
+}
+
+static int sbi_cpuidle_dt_init_states(struct device *dev,
+                                       struct cpuidle_driver *drv,
+                                       unsigned int cpu,
+                                       unsigned int state_count)
+{
+       struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
+       struct device_node *state_node;
+       struct device_node *cpu_node;
+       u32 *states;
+       int i, ret;
+
+       cpu_node = of_cpu_device_node_get(cpu);
+       if (!cpu_node)
+               return -ENODEV;
+
+       states = devm_kcalloc(dev, state_count, sizeof(*states), GFP_KERNEL);
+       if (!states) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       /* Parse SBI specific details from state DT nodes */
+       for (i = 1; i < state_count; i++) {
+               state_node = of_get_cpu_state_node(cpu_node, i - 1);
+               if (!state_node)
+                       break;
+
+               ret = sbi_dt_parse_state_node(state_node, &states[i]);
+               of_node_put(state_node);
+
+               if (ret)
+                       return ret;
+
+               pr_debug("sbi-state %#x index %d\n", states[i], i);
+       }
+       if (i != state_count) {
+               ret = -ENODEV;
+               goto fail;
+       }
+
+       /* Initialize optional data, used for the hierarchical topology. */
+       ret = sbi_dt_cpu_init_topology(drv, data, state_count, cpu);
+       if (ret < 0)
+               return ret;
+
+       /* Store states in the per-cpu struct. */
+       data->states = states;
+
+fail:
+       of_node_put(cpu_node);
+
+       return ret;
+}
+
+static void sbi_cpuidle_deinit_cpu(int cpu)
+{
+       struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
+
+       dt_idle_detach_cpu(data->dev);
+       sbi_cpuidle_use_cpuhp = false;
+}
+
+static int sbi_cpuidle_init_cpu(struct device *dev, int cpu)
+{
+       struct cpuidle_driver *drv;
+       unsigned int state_count = 0;
+       int ret = 0;
+
+       drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+       if (!drv)
+               return -ENOMEM;
+
+       drv->name = "sbi_cpuidle";
+       drv->owner = THIS_MODULE;
+       drv->cpumask = (struct cpumask *)cpumask_of(cpu);
+
+       /* RISC-V architectural WFI to be represented as state index 0. */
+       drv->states[0].enter = sbi_cpuidle_enter_state;
+       drv->states[0].exit_latency = 1;
+       drv->states[0].target_residency = 1;
+       drv->states[0].power_usage = UINT_MAX;
+       strcpy(drv->states[0].name, "WFI");
+       strcpy(drv->states[0].desc, "RISC-V WFI");
+
+       /*
+        * If no DT idle states are detected (ret == 0) let the driver
+        * initialization fail accordingly since there is no reason to
+        * initialize the idle driver if only wfi is supported, the
+        * default archictectural back-end already executes wfi
+        * on idle entry.
+        */
+       ret = dt_init_idle_driver(drv, sbi_cpuidle_state_match, 1);
+       if (ret <= 0) {
+               pr_debug("HART%ld: failed to parse DT idle states\n",
+                        cpuid_to_hartid_map(cpu));
+               return ret ? : -ENODEV;
+       }
+       state_count = ret + 1; /* Include WFI state as well */
+
+       /* Initialize idle states from DT. */
+       ret = sbi_cpuidle_dt_init_states(dev, drv, cpu, state_count);
+       if (ret) {
+               pr_err("HART%ld: failed to init idle states\n",
+                      cpuid_to_hartid_map(cpu));
+               return ret;
+       }
+
+       ret = cpuidle_register(drv, NULL);
+       if (ret)
+               goto deinit;
+
+       cpuidle_cooling_register(drv);
+
+       return 0;
+deinit:
+       sbi_cpuidle_deinit_cpu(cpu);
+       return ret;
+}
+
+static void sbi_cpuidle_domain_sync_state(struct device *dev)
+{
+       /*
+        * All devices have now been attached/probed to the PM domain
+        * topology, hence it's fine to allow domain states to be picked.
+        */
+       sbi_cpuidle_pd_allow_domain_state = true;
+}
+
+#ifdef CONFIG_DT_IDLE_GENPD
+
+static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
+{
+       struct genpd_power_state *state = &pd->states[pd->state_idx];
+       u32 *pd_state;
+
+       if (!state->data)
+               return 0;
+
+       if (!sbi_cpuidle_pd_allow_domain_state)
+               return -EBUSY;
+
+       /* OSI mode is enabled, set the corresponding domain state. */
+       pd_state = state->data;
+       sbi_set_domain_state(*pd_state);
+
+       return 0;
+}
+
+struct sbi_pd_provider {
+       struct list_head link;
+       struct device_node *node;
+};
+
+static LIST_HEAD(sbi_pd_providers);
+
+static int sbi_pd_init(struct device_node *np)
+{
+       struct generic_pm_domain *pd;
+       struct sbi_pd_provider *pd_provider;
+       struct dev_power_governor *pd_gov;
+       int ret = -ENOMEM, state_count = 0;
+
+       pd = dt_idle_pd_alloc(np, sbi_dt_parse_state_node);
+       if (!pd)
+               goto out;
+
+       pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL);
+       if (!pd_provider)
+               goto free_pd;
+
+       pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
+
+       /* Allow power off when OSI is available. */
+       if (sbi_cpuidle_use_osi)
+               pd->power_off = sbi_cpuidle_pd_power_off;
+       else
+               pd->flags |= GENPD_FLAG_ALWAYS_ON;
+
+       /* Use governor for CPU PM domains if it has some states to manage. */
+       pd_gov = state_count > 0 ? &pm_domain_cpu_gov : NULL;
+
+       ret = pm_genpd_init(pd, pd_gov, false);
+       if (ret)
+               goto free_pd_prov;
+
+       ret = of_genpd_add_provider_simple(np, pd);
+       if (ret)
+               goto remove_pd;
+
+       pd_provider->node = of_node_get(np);
+       list_add(&pd_provider->link, &sbi_pd_providers);
+
+       pr_debug("init PM domain %s\n", pd->name);
+       return 0;
+
+remove_pd:
+       pm_genpd_remove(pd);
+free_pd_prov:
+       kfree(pd_provider);
+free_pd:
+       dt_idle_pd_free(pd);
+out:
+       pr_err("failed to init PM domain ret=%d %pOF\n", ret, np);
+       return ret;
+}
+
+static void sbi_pd_remove(void)
+{
+       struct sbi_pd_provider *pd_provider, *it;
+       struct generic_pm_domain *genpd;
+
+       list_for_each_entry_safe(pd_provider, it, &sbi_pd_providers, link) {
+               of_genpd_del_provider(pd_provider->node);
+
+               genpd = of_genpd_remove_last(pd_provider->node);
+               if (!IS_ERR(genpd))
+                       kfree(genpd);
+
+               of_node_put(pd_provider->node);
+               list_del(&pd_provider->link);
+               kfree(pd_provider);
+       }
+}
+
+static int sbi_genpd_probe(struct device_node *np)
+{
+       struct device_node *node;
+       int ret = 0, pd_count = 0;
+
+       if (!np)
+               return -ENODEV;
+
+       /*
+        * Parse child nodes for the "#power-domain-cells" property and
+        * initialize a genpd/genpd-of-provider pair when it's found.
+        */
+       for_each_child_of_node(np, node) {
+               if (!of_find_property(node, "#power-domain-cells", NULL))
+                       continue;
+
+               ret = sbi_pd_init(node);
+               if (ret)
+                       goto put_node;
+
+               pd_count++;
+       }
+
+       /* Bail out if not using the hierarchical CPU topology. */
+       if (!pd_count)
+               goto no_pd;
+
+       /* Link genpd masters/subdomains to model the CPU topology. */
+       ret = dt_idle_pd_init_topology(np);
+       if (ret)
+               goto remove_pd;
+
+       return 0;
+
+put_node:
+       of_node_put(node);
+remove_pd:
+       sbi_pd_remove();
+       pr_err("failed to create CPU PM domains ret=%d\n", ret);
+no_pd:
+       return ret;
+}
+
+#else
+
+static inline int sbi_genpd_probe(struct device_node *np)
+{
+       return 0;
+}
+
+#endif
+
+static int sbi_cpuidle_probe(struct platform_device *pdev)
+{
+       int cpu, ret;
+       struct cpuidle_driver *drv;
+       struct cpuidle_device *dev;
+       struct device_node *np, *pds_node;
+
+       /* Detect OSI support based on CPU DT nodes */
+       sbi_cpuidle_use_osi = true;
+       for_each_possible_cpu(cpu) {
+               np = of_cpu_device_node_get(cpu);
+               if (np &&
+                   of_find_property(np, "power-domains", NULL) &&
+                   of_find_property(np, "power-domain-names", NULL)) {
+                       continue;
+               } else {
+                       sbi_cpuidle_use_osi = false;
+                       break;
+               }
+       }
+
+       /* Populate generic power domains from DT nodes */
+       pds_node = of_find_node_by_path("/cpus/power-domains");
+       if (pds_node) {
+               ret = sbi_genpd_probe(pds_node);
+               of_node_put(pds_node);
+               if (ret)
+                       return ret;
+       }
+
+       /* Initialize CPU idle driver for each CPU */
+       for_each_possible_cpu(cpu) {
+               ret = sbi_cpuidle_init_cpu(&pdev->dev, cpu);
+               if (ret) {
+                       pr_debug("HART%ld: idle driver init failed\n",
+                                cpuid_to_hartid_map(cpu));
+                       goto out_fail;
+               }
+       }
+
+       /* Setup CPU hotplut notifiers */
+       sbi_idle_init_cpuhp();
+
+       pr_info("idle driver registered for all CPUs\n");
+
+       return 0;
+
+out_fail:
+       while (--cpu >= 0) {
+               dev = per_cpu(cpuidle_devices, cpu);
+               drv = cpuidle_get_cpu_driver(dev);
+               cpuidle_unregister(drv);
+               sbi_cpuidle_deinit_cpu(cpu);
+       }
+
+       return ret;
+}
+
+static struct platform_driver sbi_cpuidle_driver = {
+       .probe = sbi_cpuidle_probe,
+       .driver = {
+               .name = "sbi-cpuidle",
+               .sync_state = sbi_cpuidle_domain_sync_state,
+       },
+};
+
+static int __init sbi_cpuidle_init(void)
+{
+       int ret;
+       struct platform_device *pdev;
+
+       /*
+        * The SBI HSM suspend function is only available when:
+        * 1) SBI version is 0.3 or higher
+        * 2) SBI HSM extension is available
+        */
+       if ((sbi_spec_version < sbi_mk_version(0, 3)) ||
+           sbi_probe_extension(SBI_EXT_HSM) <= 0) {
+               pr_info("HSM suspend not available\n");
+               return 0;
+       }
+
+       ret = platform_driver_register(&sbi_cpuidle_driver);
+       if (ret)
+               return ret;
+
+       pdev = platform_device_register_simple("sbi-cpuidle",
+                                               -1, NULL, 0);
+       if (IS_ERR(pdev)) {
+               platform_driver_unregister(&sbi_cpuidle_driver);
+               return PTR_ERR(pdev);
+       }
+
+       return 0;
+}
+device_initcall(sbi_cpuidle_init);
diff --git a/drivers/cpuidle/dt_idle_genpd.c b/drivers/cpuidle/dt_idle_genpd.c
new file mode 100644 (file)
index 0000000..b371655
--- /dev/null
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PM domains for CPUs via genpd.
+ *
+ * Copyright (C) 2019 Linaro Ltd.
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#define pr_fmt(fmt) "dt-idle-genpd: " fmt
+
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "dt_idle_genpd.h"
+
+static int pd_parse_state_nodes(
+                       int (*parse_state)(struct device_node *, u32 *),
+                       struct genpd_power_state *states, int state_count)
+{
+       int i, ret;
+       u32 state, *state_buf;
+
+       for (i = 0; i < state_count; i++) {
+               ret = parse_state(to_of_node(states[i].fwnode), &state);
+               if (ret)
+                       goto free_state;
+
+               state_buf = kmalloc(sizeof(u32), GFP_KERNEL);
+               if (!state_buf) {
+                       ret = -ENOMEM;
+                       goto free_state;
+               }
+               *state_buf = state;
+               states[i].data = state_buf;
+       }
+
+       return 0;
+
+free_state:
+       i--;
+       for (; i >= 0; i--)
+               kfree(states[i].data);
+       return ret;
+}
+
+static int pd_parse_states(struct device_node *np,
+                          int (*parse_state)(struct device_node *, u32 *),
+                          struct genpd_power_state **states,
+                          int *state_count)
+{
+       int ret;
+
+       /* Parse the domain idle states. */
+       ret = of_genpd_parse_idle_states(np, states, state_count);
+       if (ret)
+               return ret;
+
+       /* Fill out the dt specifics for each found state. */
+       ret = pd_parse_state_nodes(parse_state, *states, *state_count);
+       if (ret)
+               kfree(*states);
+
+       return ret;
+}
+
+static void pd_free_states(struct genpd_power_state *states,
+                           unsigned int state_count)
+{
+       int i;
+
+       for (i = 0; i < state_count; i++)
+               kfree(states[i].data);
+       kfree(states);
+}
+
+void dt_idle_pd_free(struct generic_pm_domain *pd)
+{
+       pd_free_states(pd->states, pd->state_count);
+       kfree(pd->name);
+       kfree(pd);
+}
+
+struct generic_pm_domain *dt_idle_pd_alloc(struct device_node *np,
+                       int (*parse_state)(struct device_node *, u32 *))
+{
+       struct generic_pm_domain *pd;
+       struct genpd_power_state *states = NULL;
+       int ret, state_count = 0;
+
+       pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+       if (!pd)
+               goto out;
+
+       pd->name = kasprintf(GFP_KERNEL, "%pOF", np);
+       if (!pd->name)
+               goto free_pd;
+
+       /*
+        * Parse the domain idle states and let genpd manage the state selection
+        * for those being compatible with "domain-idle-state".
+        */
+       ret = pd_parse_states(np, parse_state, &states, &state_count);
+       if (ret)
+               goto free_name;
+
+       pd->free_states = pd_free_states;
+       pd->name = kbasename(pd->name);
+       pd->states = states;
+       pd->state_count = state_count;
+
+       pr_debug("alloc PM domain %s\n", pd->name);
+       return pd;
+
+free_name:
+       kfree(pd->name);
+free_pd:
+       kfree(pd);
+out:
+       pr_err("failed to alloc PM domain %pOF\n", np);
+       return NULL;
+}
+
+int dt_idle_pd_init_topology(struct device_node *np)
+{
+       struct device_node *node;
+       struct of_phandle_args child, parent;
+       int ret;
+
+       for_each_child_of_node(np, node) {
+               if (of_parse_phandle_with_args(node, "power-domains",
+                                       "#power-domain-cells", 0, &parent))
+                       continue;
+
+               child.np = node;
+               child.args_count = 0;
+               ret = of_genpd_add_subdomain(&parent, &child);
+               of_node_put(parent.np);
+               if (ret) {
+                       of_node_put(node);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+struct device *dt_idle_attach_cpu(int cpu, const char *name)
+{
+       struct device *dev;
+
+       dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), name);
+       if (IS_ERR_OR_NULL(dev))
+               return dev;
+
+       pm_runtime_irq_safe(dev);
+       if (cpu_online(cpu))
+               pm_runtime_get_sync(dev);
+
+       dev_pm_syscore_device(dev, true);
+
+       return dev;
+}
+
+void dt_idle_detach_cpu(struct device *dev)
+{
+       if (IS_ERR_OR_NULL(dev))
+               return;
+
+       dev_pm_domain_detach(dev, false);
+}
diff --git a/drivers/cpuidle/dt_idle_genpd.h b/drivers/cpuidle/dt_idle_genpd.h
new file mode 100644 (file)
index 0000000..a95483d
--- /dev/null
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_IDLE_GENPD
+#define __DT_IDLE_GENPD
+
+struct device_node;
+struct generic_pm_domain;
+
+#ifdef CONFIG_DT_IDLE_GENPD
+
+void dt_idle_pd_free(struct generic_pm_domain *pd);
+
+struct generic_pm_domain *dt_idle_pd_alloc(struct device_node *np,
+                       int (*parse_state)(struct device_node *, u32 *));
+
+int dt_idle_pd_init_topology(struct device_node *np);
+
+struct device *dt_idle_attach_cpu(int cpu, const char *name);
+
+void dt_idle_detach_cpu(struct device *dev);
+
+#else
+
+static inline void dt_idle_pd_free(struct generic_pm_domain *pd)
+{
+}
+
+static inline struct generic_pm_domain *dt_idle_pd_alloc(
+                       struct device_node *np,
+                       int (*parse_state)(struct device_node *, u32 *))
+{
+       return NULL;
+}
+
+static inline int dt_idle_pd_init_topology(struct device_node *np)
+{
+       return 0;
+}
+
+static inline struct device *dt_idle_attach_cpu(int cpu, const char *name)
+{
+       return NULL;
+}
+
+static inline void dt_idle_detach_cpu(struct device *dev)
+{
+}
+
+#endif
+
+#endif
index b894e3a8be4fa8ab60e1f5bf5702fe870ee8e90a..5f8915f4a9ffe399bbddefca93341ef4190f9fe5 100644 (file)
@@ -3,8 +3,11 @@ config CRYPTO_DEV_VIRTIO
        tristate "VirtIO crypto driver"
        depends on VIRTIO
        select CRYPTO_AEAD
+       select CRYPTO_AKCIPHER2
        select CRYPTO_SKCIPHER
        select CRYPTO_ENGINE
+       select CRYPTO_RSA
+       select MPILIB
        help
          This driver provides support for virtio crypto device. If you
          choose 'M' here, this module will be called virtio_crypto.
index cbfccccfa135d63367739c621686ceb2958ddb83..bfa6cbae342e0fa8d52758e24b7317b841b13542 100644 (file)
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio_crypto.o
 virtio_crypto-objs := \
-       virtio_crypto_algs.o \
+       virtio_crypto_skcipher_algs.o \
+       virtio_crypto_akcipher_algs.o \
        virtio_crypto_mgr.o \
        virtio_crypto_core.o
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
new file mode 100644 (file)
index 0000000..f3ec942
--- /dev/null
@@ -0,0 +1,585 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+ /* Asymmetric algorithms supported by virtio crypto device
+  *
+  * Authors: zhenwei pi <pizhenwei@bytedance.com>
+  *          lei he <helei.sig11@bytedance.com>
+  *
+  * Copyright 2022 Bytedance CO., LTD.
+  */
+
+#include <linux/mpi.h>
+#include <linux/scatterlist.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/rsa.h>
+#include <linux/err.h>
+#include <crypto/scatterwalk.h>
+#include <linux/atomic.h>
+
+#include <uapi/linux/virtio_crypto.h>
+#include "virtio_crypto_common.h"
+
+struct virtio_crypto_rsa_ctx {
+       MPI n;
+};
+
+struct virtio_crypto_akcipher_ctx {
+       struct crypto_engine_ctx enginectx;
+       struct virtio_crypto *vcrypto;
+       struct crypto_akcipher *tfm;
+       bool session_valid;
+       __u64 session_id;
+       union {
+               struct virtio_crypto_rsa_ctx rsa_ctx;
+       };
+};
+
+struct virtio_crypto_akcipher_request {
+       struct virtio_crypto_request base;
+       struct virtio_crypto_akcipher_ctx *akcipher_ctx;
+       struct akcipher_request *akcipher_req;
+       void *src_buf;
+       void *dst_buf;
+       uint32_t opcode;
+};
+
+struct virtio_crypto_akcipher_algo {
+       uint32_t algonum;
+       uint32_t service;
+       unsigned int active_devs;
+       struct akcipher_alg algo;
+};
+
+static DEFINE_MUTEX(algs_lock);
+
+static void virtio_crypto_akcipher_finalize_req(
+       struct virtio_crypto_akcipher_request *vc_akcipher_req,
+       struct akcipher_request *req, int err)
+{
+       virtcrypto_clear_request(&vc_akcipher_req->base);
+
+       crypto_finalize_akcipher_request(vc_akcipher_req->base.dataq->engine, req, err);
+}
+
+static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *vc_req, int len)
+{
+       struct virtio_crypto_akcipher_request *vc_akcipher_req =
+               container_of(vc_req, struct virtio_crypto_akcipher_request, base);
+       struct akcipher_request *akcipher_req;
+       int error;
+
+       switch (vc_req->status) {
+       case VIRTIO_CRYPTO_OK:
+               error = 0;
+               break;
+       case VIRTIO_CRYPTO_INVSESS:
+       case VIRTIO_CRYPTO_ERR:
+               error = -EINVAL;
+               break;
+       case VIRTIO_CRYPTO_BADMSG:
+               error = -EBADMSG;
+               break;
+
+       case VIRTIO_CRYPTO_KEY_REJECTED:
+               error = -EKEYREJECTED;
+               break;
+
+       default:
+               error = -EIO;
+               break;
+       }
+
+       akcipher_req = vc_akcipher_req->akcipher_req;
+       if (vc_akcipher_req->opcode != VIRTIO_CRYPTO_AKCIPHER_VERIFY)
+               sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst),
+                                   vc_akcipher_req->dst_buf, akcipher_req->dst_len);
+       virtio_crypto_akcipher_finalize_req(vc_akcipher_req, akcipher_req, error);
+}
+
+static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx,
+               struct virtio_crypto_ctrl_header *header, void *para,
+               const uint8_t *key, unsigned int keylen)
+{
+       struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
+       struct virtio_crypto *vcrypto = ctx->vcrypto;
+       uint8_t *pkey;
+       unsigned int inlen;
+       int err;
+       unsigned int num_out = 0, num_in = 0;
+
+       pkey = kmemdup(key, keylen, GFP_ATOMIC);
+       if (!pkey)
+               return -ENOMEM;
+
+       spin_lock(&vcrypto->ctrl_lock);
+       memcpy(&vcrypto->ctrl.header, header, sizeof(vcrypto->ctrl.header));
+       memcpy(&vcrypto->ctrl.u, para, sizeof(vcrypto->ctrl.u));
+       vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+
+       sg_init_one(&outhdr_sg, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+       sgs[num_out++] = &outhdr_sg;
+
+       sg_init_one(&key_sg, pkey, keylen);
+       sgs[num_out++] = &key_sg;
+
+       sg_init_one(&inhdr_sg, &vcrypto->input, sizeof(vcrypto->input));
+       sgs[num_out + num_in++] = &inhdr_sg;
+
+       err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out, num_in, vcrypto, GFP_ATOMIC);
+       if (err < 0)
+               goto out;
+
+       virtqueue_kick(vcrypto->ctrl_vq);
+       while (!virtqueue_get_buf(vcrypto->ctrl_vq, &inlen) &&
+              !virtqueue_is_broken(vcrypto->ctrl_vq))
+               cpu_relax();
+
+       if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       ctx->session_id = le64_to_cpu(vcrypto->input.session_id);
+       ctx->session_valid = true;
+       err = 0;
+
+out:
+       spin_unlock(&vcrypto->ctrl_lock);
+       kfree_sensitive(pkey);
+
+       if (err < 0)
+               pr_err("virtio_crypto: Create session failed status: %u\n",
+                       le32_to_cpu(vcrypto->input.status));
+
+       return err;
+}
+
+static int virtio_crypto_alg_akcipher_close_session(struct virtio_crypto_akcipher_ctx *ctx)
+{
+       struct scatterlist outhdr_sg, inhdr_sg, *sgs[2];
+       struct virtio_crypto_destroy_session_req *destroy_session;
+       struct virtio_crypto *vcrypto = ctx->vcrypto;
+       unsigned int num_out = 0, num_in = 0, inlen;
+       int err;
+
+       spin_lock(&vcrypto->ctrl_lock);
+       if (!ctx->session_valid) {
+               err = 0;
+               goto out;
+       }
+       vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
+       vcrypto->ctrl.header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION);
+       vcrypto->ctrl.header.queue_id = 0;
+
+       destroy_session = &vcrypto->ctrl.u.destroy_session;
+       destroy_session->session_id = cpu_to_le64(ctx->session_id);
+
+       sg_init_one(&outhdr_sg, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+       sgs[num_out++] = &outhdr_sg;
+
+       sg_init_one(&inhdr_sg, &vcrypto->ctrl_status.status, sizeof(vcrypto->ctrl_status.status));
+       sgs[num_out + num_in++] = &inhdr_sg;
+
+       err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out, num_in, vcrypto, GFP_ATOMIC);
+       if (err < 0)
+               goto out;
+
+       virtqueue_kick(vcrypto->ctrl_vq);
+       while (!virtqueue_get_buf(vcrypto->ctrl_vq, &inlen) &&
+              !virtqueue_is_broken(vcrypto->ctrl_vq))
+               cpu_relax();
+
+       if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       err = 0;
+       ctx->session_valid = false;
+
+out:
+       spin_unlock(&vcrypto->ctrl_lock);
+       if (err < 0) {
+               pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
+                       vcrypto->ctrl_status.status, destroy_session->session_id);
+       }
+
+       return err;
+}
+
+static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request *vc_akcipher_req,
+               struct akcipher_request *req, struct data_queue *data_vq)
+{
+       struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
+       struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
+       struct virtio_crypto *vcrypto = ctx->vcrypto;
+       struct virtio_crypto_op_data_req *req_data = vc_req->req_data;
+       struct scatterlist *sgs[4], outhdr_sg, inhdr_sg, srcdata_sg, dstdata_sg;
+       void *src_buf = NULL, *dst_buf = NULL;
+       unsigned int num_out = 0, num_in = 0;
+       int node = dev_to_node(&vcrypto->vdev->dev);
+       unsigned long flags;
+       int ret = -ENOMEM;
+       bool verify = vc_akcipher_req->opcode == VIRTIO_CRYPTO_AKCIPHER_VERIFY;
+       unsigned int src_len = verify ? req->src_len + req->dst_len : req->src_len;
+
+       /* out header */
+       sg_init_one(&outhdr_sg, req_data, sizeof(*req_data));
+       sgs[num_out++] = &outhdr_sg;
+
+       /* src data */
+       src_buf = kcalloc_node(src_len, 1, GFP_KERNEL, node);
+       if (!src_buf)
+               goto err;
+
+       if (verify) {
+               /* for verify operation, both src and dst data work as OUT direction */
+               sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
+               sg_init_one(&srcdata_sg, src_buf, src_len);
+               sgs[num_out++] = &srcdata_sg;
+       } else {
+               sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
+               sg_init_one(&srcdata_sg, src_buf, src_len);
+               sgs[num_out++] = &srcdata_sg;
+
+               /* dst data */
+               dst_buf = kcalloc_node(req->dst_len, 1, GFP_KERNEL, node);
+               if (!dst_buf)
+                       goto err;
+
+               sg_init_one(&dstdata_sg, dst_buf, req->dst_len);
+               sgs[num_out + num_in++] = &dstdata_sg;
+       }
+
+       vc_akcipher_req->src_buf = src_buf;
+       vc_akcipher_req->dst_buf = dst_buf;
+
+       /* in header */
+       sg_init_one(&inhdr_sg, &vc_req->status, sizeof(vc_req->status));
+       sgs[num_out + num_in++] = &inhdr_sg;
+
+       spin_lock_irqsave(&data_vq->lock, flags);
+       ret = virtqueue_add_sgs(data_vq->vq, sgs, num_out, num_in, vc_req, GFP_ATOMIC);
+       virtqueue_kick(data_vq->vq);
+       spin_unlock_irqrestore(&data_vq->lock, flags);
+       if (ret)
+               goto err;
+
+       return 0;
+
+err:
+       kfree(src_buf);
+       kfree(dst_buf);
+
+       return -ENOMEM;
+}
+
+static int virtio_crypto_rsa_do_req(struct crypto_engine *engine, void *vreq)
+{
+       struct akcipher_request *req = container_of(vreq, struct akcipher_request, base);
+       struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req);
+       struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
+       struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
+       struct virtio_crypto *vcrypto = ctx->vcrypto;
+       struct data_queue *data_vq = vc_req->dataq;
+       struct virtio_crypto_op_header *header;
+       struct virtio_crypto_akcipher_data_req *akcipher_req;
+       int ret;
+
+       vc_req->sgs = NULL;
+       vc_req->req_data = kzalloc_node(sizeof(*vc_req->req_data),
+               GFP_KERNEL, dev_to_node(&vcrypto->vdev->dev));
+       if (!vc_req->req_data)
+               return -ENOMEM;
+
+       /* build request header */
+       header = &vc_req->req_data->header;
+       header->opcode = cpu_to_le32(vc_akcipher_req->opcode);
+       header->algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
+       header->session_id = cpu_to_le64(ctx->session_id);
+
+       /* build request akcipher data */
+       akcipher_req = &vc_req->req_data->u.akcipher_req;
+       akcipher_req->para.src_data_len = cpu_to_le32(req->src_len);
+       akcipher_req->para.dst_data_len = cpu_to_le32(req->dst_len);
+
+       ret = __virtio_crypto_akcipher_do_req(vc_akcipher_req, req, data_vq);
+       if (ret < 0) {
+               kfree_sensitive(vc_req->req_data);
+               vc_req->req_data = NULL;
+               return ret;
+       }
+
+       return 0;
+}
+
+static int virtio_crypto_rsa_req(struct akcipher_request *req, uint32_t opcode)
+{
+       struct crypto_akcipher *atfm = crypto_akcipher_reqtfm(req);
+       struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(atfm);
+       struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req);
+       struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
+       struct virtio_crypto *vcrypto = ctx->vcrypto;
+       /* Use the first data virtqueue as default */
+       struct data_queue *data_vq = &vcrypto->data_vq[0];
+
+       vc_req->dataq = data_vq;
+       vc_req->alg_cb = virtio_crypto_dataq_akcipher_callback;
+       vc_akcipher_req->akcipher_ctx = ctx;
+       vc_akcipher_req->akcipher_req = req;
+       vc_akcipher_req->opcode = opcode;
+
+       return crypto_transfer_akcipher_request_to_engine(data_vq->engine, req);
+}
+
+static int virtio_crypto_rsa_encrypt(struct akcipher_request *req)
+{
+       return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_ENCRYPT);
+}
+
+static int virtio_crypto_rsa_decrypt(struct akcipher_request *req)
+{
+       return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_DECRYPT);
+}
+
+static int virtio_crypto_rsa_sign(struct akcipher_request *req)
+{
+       return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_SIGN);
+}
+
+static int virtio_crypto_rsa_verify(struct akcipher_request *req)
+{
+       return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_VERIFY);
+}
+
+static int virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm,
+                                    const void *key,
+                                    unsigned int keylen,
+                                    bool private,
+                                    int padding_algo,
+                                    int hash_algo)
+{
+       struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
+       struct virtio_crypto *vcrypto;
+       struct virtio_crypto_ctrl_header header;
+       struct virtio_crypto_akcipher_session_para para;
+       struct rsa_key rsa_key = {0};
+       int node = virtio_crypto_get_current_node();
+       uint32_t keytype;
+       int ret;
+
+       /* mpi_free will test n, just free it. */
+       mpi_free(rsa_ctx->n);
+       rsa_ctx->n = NULL;
+
+       if (private) {
+               keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE;
+               ret = rsa_parse_priv_key(&rsa_key, key, keylen);
+       } else {
+               keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC;
+               ret = rsa_parse_pub_key(&rsa_key, key, keylen);
+       }
+
+       if (ret)
+               return ret;
+
+       rsa_ctx->n = mpi_read_raw_data(rsa_key.n, rsa_key.n_sz);
+       if (!rsa_ctx->n)
+               return -ENOMEM;
+
+       if (!ctx->vcrypto) {
+               vcrypto = virtcrypto_get_dev_node(node, VIRTIO_CRYPTO_SERVICE_AKCIPHER,
+                                               VIRTIO_CRYPTO_AKCIPHER_RSA);
+               if (!vcrypto) {
+                       pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
+                       return -ENODEV;
+               }
+
+               ctx->vcrypto = vcrypto;
+       } else {
+               virtio_crypto_alg_akcipher_close_session(ctx);
+       }
+
+       /* set ctrl header */
+       header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION);
+       header.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
+       header.queue_id = 0;
+
+       /* set RSA para */
+       para.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
+       para.keytype = cpu_to_le32(keytype);
+       para.keylen = cpu_to_le32(keylen);
+       para.u.rsa.padding_algo = cpu_to_le32(padding_algo);
+       para.u.rsa.hash_algo = cpu_to_le32(hash_algo);
+
+       return virtio_crypto_alg_akcipher_init_session(ctx, &header, &para, key, keylen);
+}
+
+static int virtio_crypto_rsa_raw_set_priv_key(struct crypto_akcipher *tfm,
+                                             const void *key,
+                                             unsigned int keylen)
+{
+       return virtio_crypto_rsa_set_key(tfm, key, keylen, 1,
+                                        VIRTIO_CRYPTO_RSA_RAW_PADDING,
+                                        VIRTIO_CRYPTO_RSA_NO_HASH);
+}
+
+
+static int virtio_crypto_p1pad_rsa_sha1_set_priv_key(struct crypto_akcipher *tfm,
+                                                    const void *key,
+                                                    unsigned int keylen)
+{
+       return virtio_crypto_rsa_set_key(tfm, key, keylen, 1,
+                                        VIRTIO_CRYPTO_RSA_PKCS1_PADDING,
+                                        VIRTIO_CRYPTO_RSA_SHA1);
+}
+
+static int virtio_crypto_rsa_raw_set_pub_key(struct crypto_akcipher *tfm,
+                                            const void *key,
+                                            unsigned int keylen)
+{
+       return virtio_crypto_rsa_set_key(tfm, key, keylen, 0,
+                                        VIRTIO_CRYPTO_RSA_RAW_PADDING,
+                                        VIRTIO_CRYPTO_RSA_NO_HASH);
+}
+
+static int virtio_crypto_p1pad_rsa_sha1_set_pub_key(struct crypto_akcipher *tfm,
+                                                   const void *key,
+                                                   unsigned int keylen)
+{
+       return virtio_crypto_rsa_set_key(tfm, key, keylen, 0,
+                                        VIRTIO_CRYPTO_RSA_PKCS1_PADDING,
+                                        VIRTIO_CRYPTO_RSA_SHA1);
+}
+
+static unsigned int virtio_crypto_rsa_max_size(struct crypto_akcipher *tfm)
+{
+       struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
+
+       return mpi_get_size(rsa_ctx->n);
+}
+
+static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+       struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+       ctx->tfm = tfm;
+       ctx->enginectx.op.do_one_request = virtio_crypto_rsa_do_req;
+       ctx->enginectx.op.prepare_request = NULL;
+       ctx->enginectx.op.unprepare_request = NULL;
+
+       return 0;
+}
+
+static void virtio_crypto_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+       struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
+
+       virtio_crypto_alg_akcipher_close_session(ctx);
+       virtcrypto_dev_put(ctx->vcrypto);
+       mpi_free(rsa_ctx->n);
+       rsa_ctx->n = NULL;
+}
+
+static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
+       {
+               .algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
+               .service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
+               .algo = {
+                       .encrypt = virtio_crypto_rsa_encrypt,
+                       .decrypt = virtio_crypto_rsa_decrypt,
+                       .set_pub_key = virtio_crypto_rsa_raw_set_pub_key,
+                       .set_priv_key = virtio_crypto_rsa_raw_set_priv_key,
+                       .max_size = virtio_crypto_rsa_max_size,
+                       .init = virtio_crypto_rsa_init_tfm,
+                       .exit = virtio_crypto_rsa_exit_tfm,
+                       .reqsize = sizeof(struct virtio_crypto_akcipher_request),
+                       .base = {
+                               .cra_name = "rsa",
+                               .cra_driver_name = "virtio-crypto-rsa",
+                               .cra_priority = 150,
+                               .cra_module = THIS_MODULE,
+                               .cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
+                       },
+               },
+       },
+       {
+               .algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
+               .service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
+               .algo = {
+                       .encrypt = virtio_crypto_rsa_encrypt,
+                       .decrypt = virtio_crypto_rsa_decrypt,
+                       .sign = virtio_crypto_rsa_sign,
+                       .verify = virtio_crypto_rsa_verify,
+                       .set_pub_key = virtio_crypto_p1pad_rsa_sha1_set_pub_key,
+                       .set_priv_key = virtio_crypto_p1pad_rsa_sha1_set_priv_key,
+                       .max_size = virtio_crypto_rsa_max_size,
+                       .init = virtio_crypto_rsa_init_tfm,
+                       .exit = virtio_crypto_rsa_exit_tfm,
+                       .reqsize = sizeof(struct virtio_crypto_akcipher_request),
+                       .base = {
+                               .cra_name = "pkcs1pad(rsa,sha1)",
+                               .cra_driver_name = "virtio-pkcs1-rsa-with-sha1",
+                               .cra_priority = 150,
+                               .cra_module = THIS_MODULE,
+                               .cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
+                       },
+               },
+       },
+};
+
+int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto)
+{
+       int ret = 0;
+       int i = 0;
+
+       mutex_lock(&algs_lock);
+
+       for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) {
+               uint32_t service = virtio_crypto_akcipher_algs[i].service;
+               uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum;
+
+               if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
+                       continue;
+
+               if (virtio_crypto_akcipher_algs[i].active_devs == 0) {
+                       ret = crypto_register_akcipher(&virtio_crypto_akcipher_algs[i].algo);
+                       if (ret)
+                               goto unlock;
+               }
+
+               virtio_crypto_akcipher_algs[i].active_devs++;
+               dev_info(&vcrypto->vdev->dev, "Registered akcipher algo %s\n",
+                        virtio_crypto_akcipher_algs[i].algo.base.cra_name);
+       }
+
+unlock:
+       mutex_unlock(&algs_lock);
+       return ret;
+}
+
+void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto)
+{
+       int i = 0;
+
+       mutex_lock(&algs_lock);
+
+       for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) {
+               uint32_t service = virtio_crypto_akcipher_algs[i].service;
+               uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum;
+
+               if (virtio_crypto_akcipher_algs[i].active_devs == 0 ||
+                   !virtcrypto_algo_is_supported(vcrypto, service, algonum))
+                       continue;
+
+               if (virtio_crypto_akcipher_algs[i].active_devs == 1)
+                       crypto_unregister_akcipher(&virtio_crypto_akcipher_algs[i].algo);
+
+               virtio_crypto_akcipher_algs[i].active_devs--;
+       }
+
+       mutex_unlock(&algs_lock);
+}
index a24f85c589e7e3035684e0288bbec9a7046a4869..e693d4ee83a636539f13f84edad068eb604a1ffa 100644 (file)
@@ -56,6 +56,7 @@ struct virtio_crypto {
        u32 mac_algo_l;
        u32 mac_algo_h;
        u32 aead_algo;
+       u32 akcipher_algo;
 
        /* Maximum length of cipher key */
        u32 max_cipher_key_len;
@@ -129,7 +130,9 @@ static inline int virtio_crypto_get_current_node(void)
        return node;
 }
 
-int virtio_crypto_algs_register(struct virtio_crypto *vcrypto);
-void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto);
+int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto);
+void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto);
+int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto);
+void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto);
 
 #endif /* _VIRTIO_CRYPTO_COMMON_H */
index 8e977b7627cb74879f1c3630b20abd96af0d3451..c6f482db0bc08608c039533d148957107cfe50bd 100644 (file)
@@ -297,6 +297,7 @@ static int virtcrypto_probe(struct virtio_device *vdev)
        u32 mac_algo_l = 0;
        u32 mac_algo_h = 0;
        u32 aead_algo = 0;
+       u32 akcipher_algo = 0;
        u32 crypto_services = 0;
 
        if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
@@ -348,6 +349,9 @@ static int virtcrypto_probe(struct virtio_device *vdev)
                        mac_algo_h, &mac_algo_h);
        virtio_cread_le(vdev, struct virtio_crypto_config,
                        aead_algo, &aead_algo);
+       if (crypto_services & (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER))
+               virtio_cread_le(vdev, struct virtio_crypto_config,
+                               akcipher_algo, &akcipher_algo);
 
        /* Add virtio crypto device to global table */
        err = virtcrypto_devmgr_add_dev(vcrypto);
@@ -374,7 +378,7 @@ static int virtcrypto_probe(struct virtio_device *vdev)
        vcrypto->mac_algo_h = mac_algo_h;
        vcrypto->hash_algo = hash_algo;
        vcrypto->aead_algo = aead_algo;
-
+       vcrypto->akcipher_algo = akcipher_algo;
 
        dev_info(&vdev->dev,
                "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
index 6860f8180c7c14f6e15bd326a16deabb4cc3d430..70e778aac0f2c917d581f3fa975339c1e7181ee0 100644 (file)
@@ -237,8 +237,14 @@ struct virtio_crypto *virtcrypto_get_dev_node(int node, uint32_t service,
  */
 int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
 {
-       if (virtio_crypto_algs_register(vcrypto)) {
-               pr_err("virtio_crypto: Failed to register crypto algs\n");
+       if (virtio_crypto_skcipher_algs_register(vcrypto)) {
+               pr_err("virtio_crypto: Failed to register crypto skcipher algs\n");
+               return -EFAULT;
+       }
+
+       if (virtio_crypto_akcipher_algs_register(vcrypto)) {
+               pr_err("virtio_crypto: Failed to register crypto akcipher algs\n");
+               virtio_crypto_skcipher_algs_unregister(vcrypto);
                return -EFAULT;
        }
 
@@ -257,7 +263,8 @@ int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
  */
 void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
 {
-       virtio_crypto_algs_unregister(vcrypto);
+       virtio_crypto_skcipher_algs_unregister(vcrypto);
+       virtio_crypto_akcipher_algs_unregister(vcrypto);
 }
 
 /*
@@ -312,6 +319,10 @@ bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto,
        case VIRTIO_CRYPTO_SERVICE_AEAD:
                algo_mask = vcrypto->aead_algo;
                break;
+
+       case VIRTIO_CRYPTO_SERVICE_AKCIPHER:
+               algo_mask = vcrypto->akcipher_algo;
+               break;
        }
 
        if (!(algo_mask & (1u << algo)))
similarity index 99%
rename from drivers/crypto/virtio/virtio_crypto_algs.c
rename to drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
index 583c0b535d13b33a044a984e09658dcd1e3574d3..a618c46a52b8ac4c80004d808a0d3cdab1f3d366 100644 (file)
@@ -613,7 +613,7 @@ static struct virtio_crypto_algo virtio_crypto_algs[] = { {
        },
 } };
 
-int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
+int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto)
 {
        int ret = 0;
        int i = 0;
@@ -644,7 +644,7 @@ unlock:
        return ret;
 }
 
-void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
+void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto)
 {
        int i = 0;
 
index 8a7267d116b7574262a54c7542605d1b2120bcdd..3f2182d6682929681b27adb2cbbcec313d7e6f21 100644 (file)
@@ -436,7 +436,6 @@ static int wait_for_media_ready(struct cxl_dev_state *cxlds)
 
        for (i = mbox_ready_timeout; i; i--) {
                u32 temp;
-               int rc;
 
                rc = pci_read_config_dword(
                        pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &temp);
index 511805dbeb75f785d153b07fe37c73b6ec26ed8e..4c9eb53ba3f896f2b3b7dee06b938e9c04f09053 100644 (file)
@@ -12,6 +12,7 @@ dmabuf_selftests-y := \
        selftest.o \
        st-dma-fence.o \
        st-dma-fence-chain.o \
+       st-dma-fence-unwrap.o \
        st-dma-resv.o
 
 obj-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o
index cb1bacb5a42b543599b35a670f023e7b777c7e41..5c8a7084577b547799890b8b75905d27e8622f96 100644 (file)
@@ -159,6 +159,8 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
        struct dma_fence_array *array;
        size_t size = sizeof(*array);
 
+       WARN_ON(!num_fences || !fences);
+
        /* Allocate the callback structures behind the array. */
        size += num_fences * sizeof(struct dma_fence_array_cb);
        array = kzalloc(size, GFP_KERNEL);
@@ -219,3 +221,33 @@ bool dma_fence_match_context(struct dma_fence *fence, u64 context)
        return true;
 }
 EXPORT_SYMBOL(dma_fence_match_context);
+
+struct dma_fence *dma_fence_array_first(struct dma_fence *head)
+{
+       struct dma_fence_array *array;
+
+       if (!head)
+               return NULL;
+
+       array = to_dma_fence_array(head);
+       if (!array)
+               return head;
+
+       if (!array->num_fences)
+               return NULL;
+
+       return array->fences[0];
+}
+EXPORT_SYMBOL(dma_fence_array_first);
+
+struct dma_fence *dma_fence_array_next(struct dma_fence *head,
+                                      unsigned int index)
+{
+       struct dma_fence_array *array = to_dma_fence_array(head);
+
+       if (!array || index >= array->num_fences)
+               return NULL;
+
+       return array->fences[index];
+}
+EXPORT_SYMBOL(dma_fence_array_next);
index 97d73aaa31daa1d72a96222cac7c7a5d177453b0..851965867d9c7f2251f901d499133f531981a92b 100644 (file)
@@ -12,4 +12,5 @@
 selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */
 selftest(dma_fence, dma_fence)
 selftest(dma_fence_chain, dma_fence_chain)
+selftest(dma_fence_unwrap, dma_fence_unwrap)
 selftest(dma_resv, dma_resv)
diff --git a/drivers/dma-buf/st-dma-fence-unwrap.c b/drivers/dma-buf/st-dma-fence-unwrap.c
new file mode 100644 (file)
index 0000000..039f016
--- /dev/null
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/dma-fence-unwrap.h>
+#if 0
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+#endif
+
+#include "selftest.h"
+
+#define CHAIN_SZ (4 << 10)
+
+static inline struct mock_fence {
+       struct dma_fence base;
+       spinlock_t lock;
+} *to_mock_fence(struct dma_fence *f) {
+       return container_of(f, struct mock_fence, base);
+}
+
+static const char *mock_name(struct dma_fence *f)
+{
+       return "mock";
+}
+
+static const struct dma_fence_ops mock_ops = {
+       .get_driver_name = mock_name,
+       .get_timeline_name = mock_name,
+};
+
+static struct dma_fence *mock_fence(void)
+{
+       struct mock_fence *f;
+
+       f = kmalloc(sizeof(*f), GFP_KERNEL);
+       if (!f)
+               return NULL;
+
+       spin_lock_init(&f->lock);
+       dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
+
+       return &f->base;
+}
+
+static struct dma_fence *mock_array(unsigned int num_fences, ...)
+{
+       struct dma_fence_array *array;
+       struct dma_fence **fences;
+       va_list valist;
+       int i;
+
+       fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
+       if (!fences)
+               return NULL;
+
+       va_start(valist, num_fences);
+       for (i = 0; i < num_fences; ++i)
+               fences[i] = va_arg(valist, typeof(*fences));
+       va_end(valist);
+
+       array = dma_fence_array_create(num_fences, fences,
+                                      dma_fence_context_alloc(1),
+                                      1, false);
+       if (!array)
+               goto cleanup;
+       return &array->base;
+
+cleanup:
+       for (i = 0; i < num_fences; ++i)
+               dma_fence_put(fences[i]);
+       kfree(fences);
+       return NULL;
+}
+
+static struct dma_fence *mock_chain(struct dma_fence *prev,
+                                   struct dma_fence *fence)
+{
+       struct dma_fence_chain *f;
+
+       f = dma_fence_chain_alloc();
+       if (!f) {
+               dma_fence_put(prev);
+               dma_fence_put(fence);
+               return NULL;
+       }
+
+       dma_fence_chain_init(f, prev, fence, 1);
+       return &f->base;
+}
+
+static int sanitycheck(void *arg)
+{
+       struct dma_fence *f, *chain, *array;
+       int err = 0;
+
+       f = mock_fence();
+       if (!f)
+               return -ENOMEM;
+
+       array = mock_array(1, f);
+       if (!array)
+               return -ENOMEM;
+
+       chain = mock_chain(NULL, array);
+       if (!chain)
+               return -ENOMEM;
+
+       dma_fence_signal(f);
+       dma_fence_put(chain);
+       return err;
+}
+
+static int unwrap_array(void *arg)
+{
+       struct dma_fence *fence, *f1, *f2, *array;
+       struct dma_fence_unwrap iter;
+       int err = 0;
+
+       f1 = mock_fence();
+       if (!f1)
+               return -ENOMEM;
+
+       f2 = mock_fence();
+       if (!f2) {
+               dma_fence_put(f1);
+               return -ENOMEM;
+       }
+
+       array = mock_array(2, f1, f2);
+       if (!array)
+               return -ENOMEM;
+
+       dma_fence_unwrap_for_each(fence, &iter, array) {
+               if (fence == f1) {
+                       f1 = NULL;
+               } else if (fence == f2) {
+                       f2 = NULL;
+               } else {
+                       pr_err("Unexpected fence!\n");
+                       err = -EINVAL;
+               }
+       }
+
+       if (f1 || f2) {
+               pr_err("Not all fences seen!\n");
+               err = -EINVAL;
+       }
+
+       dma_fence_signal(f1);
+       dma_fence_signal(f2);
+       dma_fence_put(array);
+       return 0;
+}
+
+static int unwrap_chain(void *arg)
+{
+       struct dma_fence *fence, *f1, *f2, *chain;
+       struct dma_fence_unwrap iter;
+       int err = 0;
+
+       f1 = mock_fence();
+       if (!f1)
+               return -ENOMEM;
+
+       f2 = mock_fence();
+       if (!f2) {
+               dma_fence_put(f1);
+               return -ENOMEM;
+       }
+
+       chain = mock_chain(f1, f2);
+       if (!chain)
+               return -ENOMEM;
+
+       dma_fence_unwrap_for_each(fence, &iter, chain) {
+               if (fence == f1) {
+                       f1 = NULL;
+               } else if (fence == f2) {
+                       f2 = NULL;
+               } else {
+                       pr_err("Unexpected fence!\n");
+                       err = -EINVAL;
+               }
+       }
+
+       if (f1 || f2) {
+               pr_err("Not all fences seen!\n");
+               err = -EINVAL;
+       }
+
+       dma_fence_signal(f1);
+       dma_fence_signal(f2);
+       dma_fence_put(chain);
+       return 0;
+}
+
+static int unwrap_chain_array(void *arg)
+{
+       struct dma_fence *fence, *f1, *f2, *array, *chain;
+       struct dma_fence_unwrap iter;
+       int err = 0;
+
+       f1 = mock_fence();
+       if (!f1)
+               return -ENOMEM;
+
+       f2 = mock_fence();
+       if (!f2) {
+               dma_fence_put(f1);
+               return -ENOMEM;
+       }
+
+       array = mock_array(2, f1, f2);
+       if (!array)
+               return -ENOMEM;
+
+       chain = mock_chain(NULL, array);
+       if (!chain)
+               return -ENOMEM;
+
+       dma_fence_unwrap_for_each(fence, &iter, chain) {
+               if (fence == f1) {
+                       f1 = NULL;
+               } else if (fence == f2) {
+                       f2 = NULL;
+               } else {
+                       pr_err("Unexpected fence!\n");
+                       err = -EINVAL;
+               }
+       }
+
+       if (f1 || f2) {
+               pr_err("Not all fences seen!\n");
+               err = -EINVAL;
+       }
+
+       dma_fence_signal(f1);
+       dma_fence_signal(f2);
+       dma_fence_put(chain);
+       return 0;
+}
+
+int dma_fence_unwrap(void)
+{
+       static const struct subtest tests[] = {
+               SUBTEST(sanitycheck),
+               SUBTEST(unwrap_array),
+               SUBTEST(unwrap_chain),
+               SUBTEST(unwrap_chain_array),
+       };
+
+       return subtests(tests, NULL);
+}
index 394e6e1e968604801d0468442fd4ba6e871dc447..514d213261df3d8f579dedec1269c353b84a7f0a 100644 (file)
@@ -5,6 +5,7 @@
  * Copyright (C) 2012 Google, Inc.
  */
 
+#include <linux/dma-fence-unwrap.h>
 #include <linux/export.h>
 #include <linux/file.h>
 #include <linux/fs.h>
@@ -172,20 +173,6 @@ static int sync_file_set_fence(struct sync_file *sync_file,
        return 0;
 }
 
-static struct dma_fence **get_fences(struct sync_file *sync_file,
-                                    int *num_fences)
-{
-       if (dma_fence_is_array(sync_file->fence)) {
-               struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
-
-               *num_fences = array->num_fences;
-               return array->fences;
-       }
-
-       *num_fences = 1;
-       return &sync_file->fence;
-}
-
 static void add_fence(struct dma_fence **fences,
                      int *i, struct dma_fence *fence)
 {
@@ -210,86 +197,97 @@ static void add_fence(struct dma_fence **fences,
 static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
                                         struct sync_file *b)
 {
+       struct dma_fence *a_fence, *b_fence, **fences;
+       struct dma_fence_unwrap a_iter, b_iter;
+       unsigned int index, num_fences;
        struct sync_file *sync_file;
-       struct dma_fence **fences = NULL, **nfences, **a_fences, **b_fences;
-       int i = 0, i_a, i_b, num_fences, a_num_fences, b_num_fences;
 
        sync_file = sync_file_alloc();
        if (!sync_file)
                return NULL;
 
-       a_fences = get_fences(a, &a_num_fences);
-       b_fences = get_fences(b, &b_num_fences);
-       if (a_num_fences > INT_MAX - b_num_fences)
-               goto err;
+       num_fences = 0;
+       dma_fence_unwrap_for_each(a_fence, &a_iter, a->fence)
+               ++num_fences;
+       dma_fence_unwrap_for_each(b_fence, &b_iter, b->fence)
+               ++num_fences;
 
-       num_fences = a_num_fences + b_num_fences;
+       if (num_fences > INT_MAX)
+               goto err_free_sync_file;
 
        fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
        if (!fences)
-               goto err;
+               goto err_free_sync_file;
 
        /*
-        * Assume sync_file a and b are both ordered and have no
-        * duplicates with the same context.
+        * We can't guarantee that fences in both a and b are ordered, but it is
+        * still quite likely.
         *
-        * If a sync_file can only be created with sync_file_merge
-        * and sync_file_create, this is a reasonable assumption.
+        * So attempt to order the fences as we pass over them and merge fences
+        * with the same context.
         */
-       for (i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
-               struct dma_fence *pt_a = a_fences[i_a];
-               struct dma_fence *pt_b = b_fences[i_b];
 
-               if (pt_a->context < pt_b->context) {
-                       add_fence(fences, &i, pt_a);
+       index = 0;
+       for (a_fence = dma_fence_unwrap_first(a->fence, &a_iter),
+            b_fence = dma_fence_unwrap_first(b->fence, &b_iter);
+            a_fence || b_fence; ) {
+
+               if (!b_fence) {
+                       add_fence(fences, &index, a_fence);
+                       a_fence = dma_fence_unwrap_next(&a_iter);
+
+               } else if (!a_fence) {
+                       add_fence(fences, &index, b_fence);
+                       b_fence = dma_fence_unwrap_next(&b_iter);
+
+               } else if (a_fence->context < b_fence->context) {
+                       add_fence(fences, &index, a_fence);
+                       a_fence = dma_fence_unwrap_next(&a_iter);
 
-                       i_a++;
-               } else if (pt_a->context > pt_b->context) {
-                       add_fence(fences, &i, pt_b);
+               } else if (b_fence->context < a_fence->context) {
+                       add_fence(fences, &index, b_fence);
+                       b_fence = dma_fence_unwrap_next(&b_iter);
+
+               } else if (__dma_fence_is_later(a_fence->seqno, b_fence->seqno,
+                                               a_fence->ops)) {
+                       add_fence(fences, &index, a_fence);
+                       a_fence = dma_fence_unwrap_next(&a_iter);
+                       b_fence = dma_fence_unwrap_next(&b_iter);
 
-                       i_b++;
                } else {
-                       if (__dma_fence_is_later(pt_a->seqno, pt_b->seqno,
-                                                pt_a->ops))
-                               add_fence(fences, &i, pt_a);
-                       else
-                               add_fence(fences, &i, pt_b);
-
-                       i_a++;
-                       i_b++;
+                       add_fence(fences, &index, b_fence);
+                       a_fence = dma_fence_unwrap_next(&a_iter);
+                       b_fence = dma_fence_unwrap_next(&b_iter);
                }
        }
 
-       for (; i_a < a_num_fences; i_a++)
-               add_fence(fences, &i, a_fences[i_a]);
-
-       for (; i_b < b_num_fences; i_b++)
-               add_fence(fences, &i, b_fences[i_b]);
-
-       if (i == 0)
-               fences[i++] = dma_fence_get(a_fences[0]);
+       if (index == 0)
+               fences[index++] = dma_fence_get_stub();
 
-       if (num_fences > i) {
-               nfences = krealloc_array(fences, i, sizeof(*fences), GFP_KERNEL);
-               if (!nfences)
-                       goto err;
+       if (num_fences > index) {
+               struct dma_fence **tmp;
 
-               fences = nfences;
+               /* Keep going even when reducing the size failed */
+               tmp = krealloc_array(fences, index, sizeof(*fences),
+                                    GFP_KERNEL);
+               if (tmp)
+                       fences = tmp;
        }
 
-       if (sync_file_set_fence(sync_file, fences, i) < 0)
-               goto err;
+       if (sync_file_set_fence(sync_file, fences, index) < 0)
+               goto err_put_fences;
 
        strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name));
        return sync_file;
 
-err:
-       while (i)
-               dma_fence_put(fences[--i]);
+err_put_fences:
+       while (index)
+               dma_fence_put(fences[--index]);
        kfree(fences);
+
+err_free_sync_file:
        fput(sync_file->file);
        return NULL;
-
 }
 
 static int sync_file_release(struct inode *inode, struct file *file)
@@ -398,11 +396,13 @@ static int sync_fill_fence_info(struct dma_fence *fence,
 static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
                                       unsigned long arg)
 {
-       struct sync_file_info info;
        struct sync_fence_info *fence_info = NULL;
-       struct dma_fence **fences;
+       struct dma_fence_unwrap iter;
+       struct sync_file_info info;
+       unsigned int num_fences;
+       struct dma_fence *fence;
+       int ret;
        __u32 size;
-       int num_fences, ret, i;
 
        if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
                return -EFAULT;
@@ -410,7 +410,9 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
        if (info.flags || info.pad)
                return -EINVAL;
 
-       fences = get_fences(sync_file, &num_fences);
+       num_fences = 0;
+       dma_fence_unwrap_for_each(fence, &iter, sync_file->fence)
+               ++num_fences;
 
        /*
         * Passing num_fences = 0 means that userspace doesn't want to
@@ -433,8 +435,11 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
        if (!fence_info)
                return -ENOMEM;
 
-       for (i = 0; i < num_fences; i++) {
-               int status = sync_fill_fence_info(fences[i], &fence_info[i]);
+       num_fences = 0;
+       dma_fence_unwrap_for_each(fence, &iter, sync_file->fence) {
+               int status;
+
+               status = sync_fill_fence_info(fence, &fence_info[num_fences++]);
                info.status = info.status <= 0 ? info.status : status;
        }
 
index cf6fed6dec773b74c810309fc7c564f50abed93c..45600acc0f455e55be81b75ec0cbebe3fc9a119e 100644 (file)
@@ -49,7 +49,7 @@ struct scmi_msg_resp_clock_describe_rates {
        struct {
                __le32 value_low;
                __le32 value_high;
-       } rate[0];
+       } rate[];
 #define RATE_TO_U64(X)         \
 ({                             \
        typeof(X) x = (X);      \
@@ -210,7 +210,8 @@ scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
 
        if (rate_discrete && rate) {
                clk->list.num_rates = tot_rate_cnt;
-               sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL);
+               sort(clk->list.rates, tot_rate_cnt, sizeof(*rate),
+                    rate_cmp_func, NULL);
        }
 
        clk->rate_discrete = rate_discrete;
index 46118300a4d1f8be0f5dcedc8d769f571b44dafe..e17c6568344d5bed9717d978ed245a11dce509ba 100644 (file)
@@ -679,7 +679,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
 
        xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
        if (IS_ERR(xfer)) {
-               scmi_clear_channel(info, cinfo);
+               if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
+                       scmi_clear_channel(info, cinfo);
                return;
        }
 
index 734f1eeee1617300b9f866ddf345d5f72478028b..8302a2b4aeeb175c10f0ec92ec78d71fb7c345e4 100644 (file)
@@ -405,8 +405,8 @@ static int scmi_optee_chan_free(int id, void *p, void *data)
        return 0;
 }
 
-static struct scmi_shared_mem *get_channel_shm(struct scmi_optee_channel *chan,
-                                              struct scmi_xfer *xfer)
+static struct scmi_shared_mem __iomem *
+get_channel_shm(struct scmi_optee_channel *chan, struct scmi_xfer *xfer)
 {
        if (!chan)
                return NULL;
@@ -419,7 +419,7 @@ static int scmi_optee_send_message(struct scmi_chan_info *cinfo,
                                   struct scmi_xfer *xfer)
 {
        struct scmi_optee_channel *channel = cinfo->transport_info;
-       struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer);
+       struct scmi_shared_mem __iomem *shmem = get_channel_shm(channel, xfer);
        int ret;
 
        mutex_lock(&channel->mu);
@@ -436,7 +436,7 @@ static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo,
                                      struct scmi_xfer *xfer)
 {
        struct scmi_optee_channel *channel = cinfo->transport_info;
-       struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer);
+       struct scmi_shared_mem __iomem *shmem = get_channel_shm(channel, xfer);
 
        shmem_fetch_response(shmem, xfer);
 }
index 8e5d87984a48957d9a7c4856511a490e0978f45a..41c31b10ae8488c5a897bdced43f99afa60bf985 100644 (file)
@@ -134,7 +134,7 @@ static int gpio_sim_get_multiple(struct gpio_chip *gc,
        struct gpio_sim_chip *chip = gpiochip_get_data(gc);
 
        mutex_lock(&chip->lock);
-       bitmap_copy(bits, chip->value_map, gc->ngpio);
+       bitmap_replace(bits, bits, chip->value_map, mask, gc->ngpio);
        mutex_unlock(&chip->lock);
 
        return 0;
@@ -146,7 +146,7 @@ static void gpio_sim_set_multiple(struct gpio_chip *gc,
        struct gpio_sim_chip *chip = gpiochip_get_data(gc);
 
        mutex_lock(&chip->lock);
-       bitmap_copy(chip->value_map, bits, gc->ngpio);
+       bitmap_replace(chip->value_map, chip->value_map, bits, mask, gc->ngpio);
        mutex_unlock(&chip->lock);
 }
 
index 69854fd2382aae45135b5674dc404bcadaa6a8e7..416725c26e949e65a90dbb0bf6065c1a82107880 100644 (file)
@@ -47,8 +47,9 @@ static int ts4900_gpio_direction_input(struct gpio_chip *chip,
 {
        struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
 
-       /* Only clear the OE bit here, requires a RMW. Prevents potential issue
-        * with OE and data getting to the physical pin at different times.
+       /*
+        * Only clear the OE bit here, requires a RMW. Prevents a potential issue
+        * with OE and DAT getting to the physical pin at different times.
         */
        return regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OE, 0);
 }
@@ -60,9 +61,10 @@ static int ts4900_gpio_direction_output(struct gpio_chip *chip,
        unsigned int reg;
        int ret;
 
-       /* If changing from an input to an output, we need to first set the
-        * proper data bit to what is requested and then set OE bit. This
-        * prevents a glitch that can occur on the IO line
+       /*
+        * If changing from an input to an output, we need to first set the
+        * GPIO's DAT bit to what is requested and then set the OE bit. This
+        * prevents a glitch that can occur on the IO line.
         */
        regmap_read(priv->regmap, offset, &reg);
        if (!(reg & TS4900_GPIO_OE)) {
index b159e92a3612eabb5ebd2196e3bc881eb2fd2a9b..8e03614c7a24b890d2170d0198e43d17886747dd 100644 (file)
  * Actually, the following platforms have DIO support:
  *
  * TS-5500:
- *   Documentation: http://wiki.embeddedarm.com/wiki/TS-5500
+ *   Documentation: https://docs.embeddedts.com/TS-5500
  *   Blocks: DIO1, DIO2 and LCD port.
  *
  * TS-5600:
- *   Documentation: http://wiki.embeddedarm.com/wiki/TS-5600
+ *   Documentation: https://docs.embeddedts.com/TS-5600
  *   Blocks: LCD port (identical to TS-5500 LCD).
  */
 
index a5495ad31c9ce6ad8b523fadeba6b83a3e9403a1..c2523ac26facdd4a0dc69bb0a6fa6fbd2186c893 100644 (file)
@@ -108,7 +108,7 @@ static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
  * controller does not have GPIO chip registered at the moment. This is to
  * support probe deferral.
  */
-static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
+static struct gpio_desc *acpi_get_gpiod(char *path, unsigned int pin)
 {
        struct gpio_chip *chip;
        acpi_handle handle;
@@ -136,7 +136,7 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
  * as it is intended for use outside of the GPIO layer (in a similar fashion to
  * gpiod_get_index() for example) it also holds a reference to the GPIO device.
  */
-struct gpio_desc *acpi_get_and_request_gpiod(char *path, int pin, char *label)
+struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin, char *label)
 {
        struct gpio_desc *gpio;
        int ret;
@@ -317,11 +317,12 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
        return desc;
 }
 
-static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
+static bool acpi_gpio_in_ignore_list(const char *controller_in, unsigned int pin_in)
 {
        const char *controller, *pin_str;
-       int len, pin;
+       unsigned int pin;
        char *endp;
+       int len;
 
        controller = ignore_wake;
        while (controller) {
@@ -354,13 +355,13 @@ err:
 static bool acpi_gpio_irq_is_wake(struct device *parent,
                                  struct acpi_resource_gpio *agpio)
 {
-       int pin = agpio->pin_table[0];
+       unsigned int pin = agpio->pin_table[0];
 
        if (agpio->wake_capable != ACPI_WAKE_CAPABLE)
                return false;
 
        if (acpi_gpio_in_ignore_list(dev_name(parent), pin)) {
-               dev_info(parent, "Ignoring wakeup on pin %d\n", pin);
+               dev_info(parent, "Ignoring wakeup on pin %u\n", pin);
                return false;
        }
 
@@ -378,7 +379,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
        struct acpi_gpio_event *event;
        irq_handler_t handler = NULL;
        struct gpio_desc *desc;
-       int ret, pin, irq;
+       unsigned int pin;
+       int ret, irq;
 
        if (!acpi_gpio_get_irq_resource(ares, &agpio))
                return AE_OK;
@@ -387,8 +389,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
        pin = agpio->pin_table[0];
 
        if (pin <= 255) {
-               char ev_name[5];
-               sprintf(ev_name, "_%c%02hhX",
+               char ev_name[8];
+               sprintf(ev_name, "_%c%02X",
                        agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
                        pin);
                if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
@@ -1098,7 +1100,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
 
        length = min_t(u16, agpio->pin_table_length, pin_index + bits);
        for (i = pin_index; i < length; ++i) {
-               int pin = agpio->pin_table[i];
+               unsigned int pin = agpio->pin_table[i];
                struct acpi_gpio_connection *conn;
                struct gpio_desc *desc;
                bool found;
index e59884cc12a718f16df09981b9fda2fffd81e9c4..085348e0898608dc8a9059219113f570bc6ded04 100644 (file)
@@ -1404,6 +1404,16 @@ static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset)
 {
        struct irq_domain *domain = gc->irq.domain;
 
+#ifdef CONFIG_GPIOLIB_IRQCHIP
+       /*
+        * Avoid race condition with other code, which tries to lookup
+        * an IRQ before the irqchip has been properly registered,
+        * i.e. while gpiochip is still being brought up.
+        */
+       if (!gc->irq.initialized)
+               return -EPROBE_DEFER;
+#endif
+
        if (!gpiochip_irqchip_irq_valid(gc, offset))
                return -ENXIO;
 
@@ -1593,6 +1603,15 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
 
        acpi_gpiochip_request_interrupts(gc);
 
+       /*
+        * Using barrier() here to prevent compiler from reordering
+        * gc->irq.initialized before initialization of above
+        * GPIO chip irq members.
+        */
+       barrier();
+
+       gc->irq.initialized = true;
+
        return 0;
 }
 
index 5b393622f59205700acf687179841e7e0b0b8f5b..a0f0a17e224fe554aeff766a5dc3dcc5b4b2144d 100644 (file)
 #define CONNECTOR_OBJECT_ID_eDP                   0x14
 #define CONNECTOR_OBJECT_ID_MXM                   0x15
 #define CONNECTOR_OBJECT_ID_LVDS_eDP              0x16
+#define CONNECTOR_OBJECT_ID_USBC                  0x17
 
 /* deleted */
 
index 3987ecb24ef4fd50749c7944b4cec7d8f8346517..49f734137f158a6aecc6f362f9d62e1ef601fde1 100644 (file)
@@ -5733,7 +5733,7 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
                struct amdgpu_ring *ring)
 {
 #ifdef CONFIG_X86_64
-       if (adev->flags & AMD_IS_APU)
+       if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
                return;
 #endif
        if (adev->gmc.xgmi.connected_to_cpu)
@@ -5749,7 +5749,7 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
                struct amdgpu_ring *ring)
 {
 #ifdef CONFIG_X86_64
-       if (adev->flags & AMD_IS_APU)
+       if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
                return;
 #endif
        if (adev->gmc.xgmi.connected_to_cpu)
index bb1c025d90019a443d8f53cf596520b08988cf17..29e9419a914bb1d0cf759d6462613a2acc3800af 100644 (file)
@@ -680,7 +680,7 @@ MODULE_PARM_DESC(sched_policy,
  * Maximum number of processes that HWS can schedule concurrently. The maximum is the
  * number of VMIDs assigned to the HWS, which is also the default.
  */
-int hws_max_conc_proc = 8;
+int hws_max_conc_proc = -1;
 module_param(hws_max_conc_proc, int, 0444);
 MODULE_PARM_DESC(hws_max_conc_proc,
        "Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))");
@@ -2323,18 +2323,23 @@ static int amdgpu_pmops_suspend(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(drm_dev);
-       int r;
 
        if (amdgpu_acpi_is_s0ix_active(adev))
                adev->in_s0ix = true;
        else
                adev->in_s3 = true;
-       r = amdgpu_device_suspend(drm_dev, true);
-       if (r)
-               return r;
+       return amdgpu_device_suspend(drm_dev, true);
+}
+
+static int amdgpu_pmops_suspend_noirq(struct device *dev)
+{
+       struct drm_device *drm_dev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
        if (!adev->in_s0ix)
-               r = amdgpu_asic_reset(adev);
-       return r;
+               return amdgpu_asic_reset(adev);
+
+       return 0;
 }
 
 static int amdgpu_pmops_resume(struct device *dev)
@@ -2575,6 +2580,7 @@ static const struct dev_pm_ops amdgpu_pm_ops = {
        .prepare = amdgpu_pmops_prepare,
        .complete = amdgpu_pmops_complete,
        .suspend = amdgpu_pmops_suspend,
+       .suspend_noirq = amdgpu_pmops_suspend_noirq,
        .resume = amdgpu_pmops_resume,
        .freeze = amdgpu_pmops_freeze,
        .thaw = amdgpu_pmops_thaw,
index 8fe9399762242185c0d1e6a597beff027507ed6e..28a736c507bb3f84e956203d2c115b8afdc160de 100644 (file)
@@ -266,7 +266,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
                    * adev->gfx.mec.num_pipe_per_mec
                    * adev->gfx.mec.num_queue_per_pipe;
 
-       while (queue_bit-- >= 0) {
+       while (--queue_bit >= 0) {
                if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
                        continue;
 
index ca2cfb65f9763b640d8d90a9bd71d0cebc0bd620..a66a0881a934bb0ff3a8d2e551a262be02df739e 100644 (file)
@@ -561,9 +561,15 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
 
        switch (adev->ip_versions[GC_HWIP][0]) {
        case IP_VERSION(9, 0, 1):
+       case IP_VERSION(9, 3, 0):
        case IP_VERSION(9, 4, 0):
        case IP_VERSION(9, 4, 1):
        case IP_VERSION(9, 4, 2):
+       case IP_VERSION(10, 3, 3):
+       case IP_VERSION(10, 3, 4):
+       case IP_VERSION(10, 3, 5):
+       case IP_VERSION(10, 3, 6):
+       case IP_VERSION(10, 3, 7):
                /*
                 * noretry = 0 will cause kfd page fault tests fail
                 * for some ASICs, so set default to 1 for these ASICs.
index 25731719c627d6cd7f8ae8b85fb093e521596a5a..940752488330f64f8a3cdc267d399843d3158d5a 100644 (file)
@@ -1284,6 +1284,7 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
  */
 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
 {
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
        struct dma_fence *fence = NULL;
        struct amdgpu_bo *abo;
        int r;
@@ -1303,7 +1304,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
                amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
 
        if (bo->resource->mem_type != TTM_PL_VRAM ||
-           !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
+           !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
+           adev->in_suspend || adev->shutdown)
                return;
 
        if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
index 5320bb0883d85f352867289911f67eaf64740926..317d80209e9581bb7d3f767306959b6a199ba763 100644 (file)
@@ -300,8 +300,8 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
 void amdgpu_ring_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_undo(struct amdgpu_ring *ring);
 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
-                    unsigned int ring_size, struct amdgpu_irq_src *irq_src,
-                    unsigned int irq_type, unsigned int prio,
+                    unsigned int max_dw, struct amdgpu_irq_src *irq_src,
+                    unsigned int irq_type, unsigned int hw_prio,
                     atomic_t *sched_score);
 void amdgpu_ring_fini(struct amdgpu_ring *ring);
 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
index f99093f2ebc71ce12ee596c97688f33521711a71..a0ee828a4a97804cb7e4863fca2ef9ae339560b2 100644 (file)
@@ -52,7 +52,7 @@
 #define FIRMWARE_ALDEBARAN     "amdgpu/aldebaran_vcn.bin"
 #define FIRMWARE_BEIGE_GOBY    "amdgpu/beige_goby_vcn.bin"
 #define FIRMWARE_YELLOW_CARP   "amdgpu/yellow_carp_vcn.bin"
-#define FIRMWARE_VCN_3_1_2     "amdgpu/vcn_3_1_2_vcn.bin"
+#define FIRMWARE_VCN_3_1_2     "amdgpu/vcn_3_1_2.bin"
 
 MODULE_FIRMWARE(FIRMWARE_RAVEN);
 MODULE_FIRMWARE(FIRMWARE_PICASSO);
index e2fde88aaf5e3d1f5aed612ce3c813c4fecc9acb..f06fb7f882e2ec125dd4ef8316e5cf9e4cf382dd 100644 (file)
 #define AMDGPU_VCN_MULTI_QUEUE_FLAG    (1 << 8)
 #define AMDGPU_VCN_SW_RING_FLAG                (1 << 9)
 #define AMDGPU_VCN_FW_LOGGING_FLAG     (1 << 10)
+#define AMDGPU_VCN_SMU_VERSION_INFO_FLAG (1 << 11)
 
 #define AMDGPU_VCN_IB_FLAG_DECODE_BUFFER       0x00000001
 #define AMDGPU_VCN_CMD_FLAG_MSG_BUFFER         0x00000001
@@ -279,6 +280,11 @@ struct amdgpu_fw_shared_fw_logging {
        uint32_t size;
 };
 
+struct amdgpu_fw_shared_smu_interface_info {
+       uint8_t smu_interface_type;
+       uint8_t padding[3];
+};
+
 struct amdgpu_fw_shared {
        uint32_t present_flag_0;
        uint8_t pad[44];
@@ -287,6 +293,7 @@ struct amdgpu_fw_shared {
        struct amdgpu_fw_shared_multi_queue multi_queue;
        struct amdgpu_fw_shared_sw_ring sw_ring;
        struct amdgpu_fw_shared_fw_logging fw_log;
+       struct amdgpu_fw_shared_smu_interface_info smu_interface_info;
 };
 
 struct amdgpu_vcn_fwlog {
index f4c6accd32263c537dabbd10fbdf6d8ea874778e..9426e252d8aa6c67618b8244d3791e3e37ce0e28 100644 (file)
@@ -3293,7 +3293,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_3[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000242),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
@@ -3429,7 +3429,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_6[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000042),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x00000044),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
@@ -3454,7 +3454,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_7[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000041),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
@@ -7689,6 +7689,7 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
        switch (adev->ip_versions[GC_HWIP][0]) {
        case IP_VERSION(10, 3, 1):
        case IP_VERSION(10, 3, 3):
+       case IP_VERSION(10, 3, 7):
                preempt_disable();
                clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh);
                clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh);
index 46d4bf27ebbbb5ab84b13ce4ba3c35d7ff0a113e..b8cfcc6b1125ccfe99b262fb03ae26dd295f19fa 100644 (file)
@@ -1205,6 +1205,8 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
        { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
        /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
        { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
+       /* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
+       { 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
        { 0, 0, 0, 0, 0 },
 };
 
index 3c1d440824a73c30db584d73857509a9134cbdda..5228421b0f724745f111737d6a79f418cb2f595c 100644 (file)
@@ -814,7 +814,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
        adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 
 #ifdef CONFIG_X86_64
-       if (adev->flags & AMD_IS_APU) {
+       if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
                adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
                adev->gmc.aper_size = adev->gmc.real_vram_size;
        }
index 344d819b4c1b6e9b03d772adefee87ac2ac956ef..979da6f510e886ffba16b323d5c19c72035869c2 100644 (file)
@@ -381,8 +381,9 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
        adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 
 #ifdef CONFIG_X86_64
-       if (adev->flags & AMD_IS_APU &&
-           adev->gmc.real_vram_size > adev->gmc.aper_size) {
+       if ((adev->flags & AMD_IS_APU) &&
+           adev->gmc.real_vram_size > adev->gmc.aper_size &&
+           !amdgpu_passthrough(adev)) {
                adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
                adev->gmc.aper_size = adev->gmc.real_vram_size;
        }
index ca9841d5669fb9829cf6471b51b918d43835a195..1932a3e4af7e2e75dd530e416459e85993470b04 100644 (file)
@@ -581,7 +581,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
        adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 
 #ifdef CONFIG_X86_64
-       if (adev->flags & AMD_IS_APU) {
+       if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
                adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
                adev->gmc.aper_size = adev->gmc.real_vram_size;
        }
index 431742eb78110acd28507e85673cf4e94eda4a9f..6009fbfdcc198bcdf82aa5a3bc127163741bd27d 100644 (file)
@@ -1456,7 +1456,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
         */
 
        /* check whether both host-gpu and gpu-gpu xgmi links exist */
-       if ((adev->flags & AMD_IS_APU) ||
+       if (((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) ||
            (adev->gmc.xgmi.supported &&
             adev->gmc.xgmi.connected_to_cpu)) {
                adev->gmc.aper_base =
@@ -1721,7 +1721,7 @@ static int gmc_v9_0_sw_fini(void *handle)
        amdgpu_gem_force_release(adev);
        amdgpu_vm_manager_fini(adev);
        amdgpu_gart_table_vram_free(adev);
-       amdgpu_bo_unref(&adev->gmc.pdb0_bo);
+       amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0);
        amdgpu_bo_fini(adev);
 
        return 0;
index dff54190b96c7971463dc0bc479ce35db86f0dbc..f0fbcda76f5e398e40464b1a3b949160056d6d5e 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/firmware.h>
 
 #include "amdgpu.h"
+#include "amdgpu_cs.h"
 #include "amdgpu_vcn.h"
 #include "amdgpu_pm.h"
 #include "soc15.h"
@@ -1900,6 +1901,75 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
        .set_powergating_state = vcn_v1_0_set_powergating_state,
 };
 
+/*
+ * It is a hardware issue that VCN can't handle a GTT TMZ buffer on
+ * CHIP_RAVEN series ASIC. Move such a GTT TMZ buffer to VRAM domain
+ * before command submission as a workaround.
+ */
+static int vcn_v1_0_validate_bo(struct amdgpu_cs_parser *parser,
+                               struct amdgpu_job *job,
+                               uint64_t addr)
+{
+       struct ttm_operation_ctx ctx = { false, false };
+       struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
+       struct amdgpu_vm *vm = &fpriv->vm;
+       struct amdgpu_bo_va_mapping *mapping;
+       struct amdgpu_bo *bo;
+       int r;
+
+       addr &= AMDGPU_GMC_HOLE_MASK;
+       if (addr & 0x7) {
+               DRM_ERROR("VCN messages must be 8 byte aligned!\n");
+               return -EINVAL;
+       }
+
+       mapping = amdgpu_vm_bo_lookup_mapping(vm, addr/AMDGPU_GPU_PAGE_SIZE);
+       if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
+               return -EINVAL;
+
+       bo = mapping->bo_va->base.bo;
+       if (!(bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED))
+               return 0;
+
+       amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+       if (r) {
+               DRM_ERROR("Failed to validate the VCN message BO (%d)!\n", r);
+               return r;
+       }
+
+       return r;
+}
+
+static int vcn_v1_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
+                                          struct amdgpu_job *job,
+                                          struct amdgpu_ib *ib)
+{
+       uint32_t msg_lo = 0, msg_hi = 0;
+       int i, r;
+
+       if (!(ib->flags & AMDGPU_IB_FLAGS_SECURE))
+               return 0;
+
+       for (i = 0; i < ib->length_dw; i += 2) {
+               uint32_t reg = amdgpu_ib_get_value(ib, i);
+               uint32_t val = amdgpu_ib_get_value(ib, i + 1);
+
+               if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
+                       msg_lo = val;
+               } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) {
+                       msg_hi = val;
+               } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0)) {
+                       r = vcn_v1_0_validate_bo(p, job,
+                                                ((u64)msg_hi) << 32 | msg_lo);
+                       if (r)
+                               return r;
+               }
+       }
+
+       return 0;
+}
+
 static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
        .type = AMDGPU_RING_TYPE_VCN_DEC,
        .align_mask = 0xf,
@@ -1910,6 +1980,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
        .get_rptr = vcn_v1_0_dec_ring_get_rptr,
        .get_wptr = vcn_v1_0_dec_ring_get_wptr,
        .set_wptr = vcn_v1_0_dec_ring_set_wptr,
+       .patch_cs_in_place = vcn_v1_0_ring_patch_cs_in_place,
        .emit_frame_size =
                6 + 6 + /* hdp invalidate / flush */
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
index c87263ed20ecb8a17d3b8bfaa9ca1d352035ae43..cb5f0a12333f308d2ec358014ec5cf206f305d11 100644 (file)
@@ -219,6 +219,11 @@ static int vcn_v3_0_sw_init(void *handle)
                                             cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) |
                                             cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB);
                fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED);
+               fw_shared->present_flag_0 |= AMDGPU_VCN_SMU_VERSION_INFO_FLAG;
+               if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 2))
+                       fw_shared->smu_interface_info.smu_interface_type = 2;
+               else if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 1))
+                       fw_shared->smu_interface_info.smu_interface_type = 1;
 
                if (amdgpu_vcnfw_log)
                        amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
@@ -575,8 +580,8 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
                        AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
 
        /* VCN global tiling registers */
-       WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
-               UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+       WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+               UVD, inst_idx, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
 }
 
 static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
@@ -1480,8 +1485,11 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
 
 static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
 {
+       struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
        uint32_t tmp;
 
+       vcn_v3_0_pause_dpg_mode(adev, inst_idx, &state);
+
        /* Wait for power status to be 1 */
        SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
                UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
index 339e12c94cfff496be946947397dcfc4a91a3f8e..62aa6c9d5123df2a36d65df29910dd6a13b0d14c 100644 (file)
@@ -483,15 +483,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
        }
 
        /* Verify module parameters regarding mapped process number*/
-       if ((hws_max_conc_proc < 0)
-                       || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
-               dev_err(kfd_device,
-                       "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
-                       hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
-                       kfd->vm_info.vmid_num_kfd);
+       if (hws_max_conc_proc >= 0)
+               kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd);
+       else
                kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
-       } else
-               kfd->max_proc_per_quantum = hws_max_conc_proc;
 
        /* calculate max size of mqds needed for queues */
        size = max_num_of_queues_per_device *
@@ -536,7 +531,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
                goto kfd_doorbell_error;
        }
 
-       kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
+       if (amdgpu_use_xgmi_p2p)
+               kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
 
        kfd->noretry = kfd->adev->gmc.noretry;
 
index deecccebe5b64ceb78f152aac6ad3fbfdff82766..64f4a51cc880e77f80268d2c947c9dc8c94cfa70 100644 (file)
@@ -749,6 +749,8 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
        event_waiters = kmalloc_array(num_events,
                                        sizeof(struct kfd_event_waiter),
                                        GFP_KERNEL);
+       if (!event_waiters)
+               return NULL;
 
        for (i = 0; (event_waiters) && (i < num_events) ; i++) {
                init_wait(&event_waiters[i].wait);
index e4beebb1c80a21ed62b5c0abca0a373aed9bce35..f2e1d506ba211f04c35104e692d5bb197dc0df10 100644 (file)
@@ -247,15 +247,6 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd)
                return ret;
        }
 
-       ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client,
-                              O_RDWR);
-       if (ret < 0) {
-               kfifo_free(&client->fifo);
-               kfree(client);
-               return ret;
-       }
-       *fd = ret;
-
        init_waitqueue_head(&client->wait_queue);
        spin_lock_init(&client->lock);
        client->events = 0;
@@ -265,5 +256,20 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd)
        list_add_rcu(&client->list, &dev->smi_clients);
        spin_unlock(&dev->smi_lock);
 
+       ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client,
+                              O_RDWR);
+       if (ret < 0) {
+               spin_lock(&dev->smi_lock);
+               list_del_rcu(&client->list);
+               spin_unlock(&dev->smi_lock);
+
+               synchronize_rcu();
+
+               kfifo_free(&client->fifo);
+               kfree(client);
+               return ret;
+       }
+       *fd = ret;
+
        return 0;
 }
index b30656959fd862324591d3d3ad73ff465ab642b6..62139ff35476c3a94f1fb31a3393d8ec75b0450e 100644 (file)
@@ -2714,7 +2714,8 @@ static int dm_resume(void *handle)
                 * this is the case when traversing through already created
                 * MST connectors, should be skipped
                 */
-               if (aconnector->mst_port)
+               if (aconnector->dc_link &&
+                   aconnector->dc_link->type == dc_connection_mst_branch)
                        continue;
 
                mutex_lock(&aconnector->hpd_lock);
@@ -3972,7 +3973,7 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap
                                 max - min);
 }
 
-static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
+static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
                                         int bl_idx,
                                         u32 user_brightness)
 {
@@ -4003,7 +4004,8 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
                        DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
        }
 
-       return rc ? 0 : 1;
+       if (rc)
+               dm->actual_brightness[bl_idx] = user_brightness;
 }
 
 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
@@ -9947,7 +9949,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
        /* restore the backlight level */
        for (i = 0; i < dm->num_of_edps; i++) {
                if (dm->backlight_dev[i] &&
-                   (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
+                   (dm->actual_brightness[i] != dm->brightness[i]))
                        amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
        }
 #endif
index 6a908d736d6ac9065fc340e6e6151fff03d036f9..7e44b04294488a39560da4a8d4da1033f492bef4 100644 (file)
@@ -540,6 +540,12 @@ struct amdgpu_display_manager {
         * cached backlight values.
         */
        u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
+       /**
+        * @actual_brightness:
+        *
+        * last successfully applied backlight values.
+        */
+       u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
 };
 
 enum dsc_clock_force_state {
index dfba6138f53839ec7ca93e1a616515327a65d542..26feefbb8990ae52481fa6af7f044962d7ea60ba 100644 (file)
@@ -374,7 +374,7 @@ void dce_clock_read_ss_info(struct clk_mgr_internal *clk_mgr_dce)
                                clk_mgr_dce->dprefclk_ss_percentage =
                                                info.spread_spectrum_percentage;
                        }
-                       if (clk_mgr_dce->base.ctx->dc->debug.ignore_dpref_ss)
+                       if (clk_mgr_dce->base.ctx->dc->config.ignore_dpref_ss)
                                clk_mgr_dce->dprefclk_ss_percentage = 0;
                }
        }
index edda572dc57017520e4e2e19cc78c67de6331479..8be4c19706285ffbaa933cdd3dae9bdd7f768957 100644 (file)
@@ -436,57 +436,84 @@ static void dcn315_clk_mgr_helper_populate_bw_params(
                struct integrated_info *bios_info,
                const DpmClocks_315_t *clock_table)
 {
-       int i, j;
+       int i;
        struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
-       uint32_t max_dispclk = 0, max_dppclk = 0;
-
-       j = -1;
-
-       ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
-
-       /* Find lowest DPM, FCLK is filled in reverse order*/
-
-       for (i = NUM_DF_PSTATE_LEVELS - 1; i >= 0; i--) {
-               if (clock_table->DfPstateTable[i].FClk != 0) {
-                       j = i;
-                       break;
+       uint32_t max_dispclk, max_dppclk, max_pstate, max_socclk, max_fclk = 0, min_pstate = 0;
+       struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
+
+       max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
+       max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
+       max_socclk = find_max_clk_value(clock_table->SocClocks, clock_table->NumSocClkLevelsEnabled);
+
+       /* Find highest fclk pstate */
+       for (i = 0; i < clock_table->NumDfPstatesEnabled; i++) {
+               if (clock_table->DfPstateTable[i].FClk > max_fclk) {
+                       max_fclk = clock_table->DfPstateTable[i].FClk;
+                       max_pstate = i;
                }
        }
 
-       if (j == -1) {
-               /* clock table is all 0s, just use our own hardcode */
-               ASSERT(0);
-               return;
-       }
-
-       bw_params->clk_table.num_entries = j + 1;
-
-       /* dispclk and dppclk can be max at any voltage, same number of levels for both */
-       if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
-           clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
-               max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
-               max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
-       } else {
-               ASSERT(0);
-       }
+       /* For 315 we want to base clock table on dcfclk, need at least one entry regardless of pmfw table */
+       for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
+               int j;
+               uint32_t min_fclk = clock_table->DfPstateTable[0].FClk;
 
-       for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
-               int temp;
+               for (j = 1; j < clock_table->NumDfPstatesEnabled; j++) {
+                       if (clock_table->DfPstateTable[j].Voltage <= clock_table->SocVoltage[i]
+                                       && clock_table->DfPstateTable[j].FClk < min_fclk) {
+                               min_fclk = clock_table->DfPstateTable[j].FClk;
+                               min_pstate = j;
+                       }
+               }
 
-               bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].FClk;
-               bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].MemClk;
-               bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].Voltage;
+               bw_params->clk_table.entries[i].fclk_mhz = min_fclk;
+               bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[min_pstate].MemClk;
+               bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[min_pstate].Voltage;
+               bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
+               bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
+               bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
+               bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
                bw_params->clk_table.entries[i].wck_ratio = 1;
-               temp = find_clk_for_voltage(clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage);
-               if (temp)
-                       bw_params->clk_table.entries[i].dcfclk_mhz = temp;
-               temp = find_clk_for_voltage(clock_table, clock_table->SocClocks, clock_table->DfPstateTable[j].Voltage);
-               if (temp)
-                       bw_params->clk_table.entries[i].socclk_mhz = temp;
+       };
+
+       /* Make sure to include at least one entry and highest pstate */
+       if (max_pstate != min_pstate) {
+               bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
+               bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[max_pstate].MemClk;
+               bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[max_pstate].Voltage;
+               bw_params->clk_table.entries[i].dcfclk_mhz = find_clk_for_voltage(
+                               clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[max_pstate].Voltage);
+               bw_params->clk_table.entries[i].socclk_mhz = find_clk_for_voltage(
+                               clock_table, clock_table->SocClocks, clock_table->DfPstateTable[max_pstate].Voltage);
                bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
                bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+               bw_params->clk_table.entries[i].wck_ratio = 1;
+               i++;
        }
+       bw_params->clk_table.num_entries = i;
+
+       /* Include highest socclk */
+       if (bw_params->clk_table.entries[i-1].socclk_mhz < max_socclk)
+               bw_params->clk_table.entries[i-1].socclk_mhz = max_socclk;
 
+       /* Set any 0 clocks to max default setting. Not an issue for
+        * power since we aren't doing switching in such case anyway
+        */
+       for (i = 0; i < bw_params->clk_table.num_entries; i++) {
+               if (!bw_params->clk_table.entries[i].fclk_mhz) {
+                       bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
+                       bw_params->clk_table.entries[i].memclk_mhz = def_max.memclk_mhz;
+                       bw_params->clk_table.entries[i].voltage = def_max.voltage;
+               }
+               if (!bw_params->clk_table.entries[i].dcfclk_mhz)
+                       bw_params->clk_table.entries[i].dcfclk_mhz = def_max.dcfclk_mhz;
+               if (!bw_params->clk_table.entries[i].socclk_mhz)
+                       bw_params->clk_table.entries[i].socclk_mhz = def_max.socclk_mhz;
+               if (!bw_params->clk_table.entries[i].dispclk_mhz)
+                       bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz;
+               if (!bw_params->clk_table.entries[i].dppclk_mhz)
+                       bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz;
+       }
        bw_params->vram_type = bios_info->memory_type;
        bw_params->num_channels = bios_info->ma_channel_number;
 
index 880ffea2afc6a1df3822a12c5ec3d8e673ea6354..2600313fea5797321a81688a20f028721cd2fcfb 100644 (file)
@@ -80,8 +80,8 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D
 #define VBIOSSMC_MSG_SetDppclkFreq                0x06 ///< Set DPP clock frequency in MHZ
 #define VBIOSSMC_MSG_SetHardMinDcfclkByFreq       0x07 ///< Set DCF clock frequency hard min in MHZ
 #define VBIOSSMC_MSG_SetMinDeepSleepDcfclk        0x08 ///< Set DCF clock minimum frequency in deep sleep in MHZ
-#define VBIOSSMC_MSG_SetPhyclkVoltageByFreq       0x09 ///< Set display phy clock frequency in MHZ in case VMIN does not support phy frequency
-#define VBIOSSMC_MSG_GetFclkFrequency             0x0A ///< Get FCLK frequency, return frequemcy in MHZ
+#define VBIOSSMC_MSG_GetDtbclkFreq                0x09 ///< Get display dtb clock frequency in MHZ in case VMIN does not support phy frequency
+#define VBIOSSMC_MSG_SetDtbClk                    0x0A ///< Set dtb clock frequency, return frequemcy in MHZ
 #define VBIOSSMC_MSG_SetDisplayCount              0x0B ///< Inform PMFW of number of display connected
 #define VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown 0x0C ///< To ask PMFW turn off TMDP 48MHz refclk during display off to save power
 #define VBIOSSMC_MSG_UpdatePmeRestore             0x0D ///< To ask PMFW to write into Azalia for PME wake up event
@@ -324,15 +324,26 @@ int dcn315_smu_get_dpref_clk(struct clk_mgr_internal *clk_mgr)
        return (dprefclk_get_mhz * 1000);
 }
 
-int dcn315_smu_get_smu_fclk(struct clk_mgr_internal *clk_mgr)
+int dcn315_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr)
 {
        int fclk_get_mhz = -1;
 
        if (clk_mgr->smu_present) {
                fclk_get_mhz = dcn315_smu_send_msg_with_param(
                        clk_mgr,
-                       VBIOSSMC_MSG_GetFclkFrequency,
+                       VBIOSSMC_MSG_GetDtbclkFreq,
                        0);
        }
        return (fclk_get_mhz * 1000);
 }
+
+void dcn315_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable)
+{
+       if (!clk_mgr->smu_present)
+               return;
+
+       dcn315_smu_send_msg_with_param(
+                       clk_mgr,
+                       VBIOSSMC_MSG_SetDtbClk,
+                       enable);
+}
index 66fa42f8dd189768e414d42cd8d2506ce4d63553..5aa3275ac7d88c879c86958fafcd793be746386d 100644 (file)
@@ -37,6 +37,7 @@
 #define NUM_SOC_VOLTAGE_LEVELS  4
 #define NUM_DF_PSTATE_LEVELS    4
 
+
 typedef struct {
   uint16_t MinClock; // This is either DCFCLK or SOCCLK (in MHz)
   uint16_t MaxClock; // This is either DCFCLK or SOCCLK (in MHz)
@@ -124,5 +125,6 @@ void dcn315_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
 void dcn315_smu_request_voltage_via_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz);
 void dcn315_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr);
 int dcn315_smu_get_dpref_clk(struct clk_mgr_internal *clk_mgr);
-int dcn315_smu_get_smu_fclk(struct clk_mgr_internal *clk_mgr);
+int dcn315_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr);
+void dcn315_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable);
 #endif /* DAL_DC_315_SMU_H_ */
index 702d00ce7da4c439971e26eeb5a3e2d6516af1c6..3121dd2d2a91113e3aa449fd7619a35d4c101578 100644 (file)
@@ -686,8 +686,8 @@ void dcn316_clk_mgr_construct(
        clk_mgr->base.base.dprefclk_khz = dcn316_smu_get_dpref_clk(&clk_mgr->base);
        clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz;
        dce_clock_read_ss_info(&clk_mgr->base);
-       clk_mgr->base.dccg->ref_dtbclk_khz =
-       dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);
+       /*clk_mgr->base.dccg->ref_dtbclk_khz =
+       dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);*/
 
        clk_mgr->base.base.bw_params = &dcn316_bw_params;
 
index f6e19efea7568de940803065da576aab11b85fc9..c436db416708e3f8f857e9cc5d8abbcc6f34d77d 100644 (file)
@@ -2389,6 +2389,8 @@ static enum surface_update_type check_update_surfaces_for_stream(
 
                if (stream_update->mst_bw_update)
                        su_flags->bits.mst_bw = 1;
+               if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc))
+                       su_flags->bits.crtc_timing_adjust = 1;
 
                if (su_flags->raw != 0)
                        overall_type = UPDATE_TYPE_FULL;
@@ -2650,6 +2652,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
        if (update->vrr_infopacket)
                stream->vrr_infopacket = *update->vrr_infopacket;
 
+       if (update->crtc_timing_adjust)
+               stream->adjust = *update->crtc_timing_adjust;
+
        if (update->dpms_off)
                stream->dpms_off = *update->dpms_off;
 
@@ -4051,3 +4056,17 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
        if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
                pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
 }
+/*
+ * dc_extended_blank_supported: Decide whether extended blank is supported
+ *
+ * Extended blank is a freesync optimization feature to be enabled in the future.
+ * During the extra vblank period gained from freesync, we have the ability to enter z9/z10.
+ *
+ * @param [in] dc: Current DC state
+ * @return: Indicate whether extended blank is supported (true or false)
+ */
+bool dc_extended_blank_supported(struct dc *dc)
+{
+       return dc->debug.extended_blank_optimization && !dc->debug.disable_z10
+               && dc->caps.zstate_support && dc->caps.is_apu;
+}
index cb87dd643180876011b4855557a56a410db5a713..bbaa5abdf88859a3d77cec38acaeabd2a1c400c4 100644 (file)
@@ -983,8 +983,7 @@ static bool should_verify_link_capability_destructively(struct dc_link *link,
                                destrictive = false;
                        }
                }
-       } else if (dc_is_hdmi_signal(link->local_sink->sink_signal))
-               destrictive = true;
+       }
 
        return destrictive;
 }
index 351081f574cb7bb6755412c69b9fc1271cdf41eb..22dabe596dfcc1123ff5314b2d2351e43c9b5fe6 100644 (file)
@@ -5216,6 +5216,62 @@ static void retrieve_cable_id(struct dc_link *link)
                                &link->dpcd_caps.cable_id, &usbc_cable_id);
 }
 
+/* DPRX may take some time to respond to AUX messages after HPD asserted.
+ * If AUX read unsuccessful, try to wake unresponsive DPRX by toggling DPCD SET_POWER (0x600).
+ */
+static enum dc_status wa_try_to_wake_dprx(struct dc_link *link, uint64_t timeout_ms)
+{
+       enum dc_status status = DC_ERROR_UNEXPECTED;
+       uint8_t dpcd_data = 0;
+       uint64_t start_ts = 0;
+       uint64_t current_ts = 0;
+       uint64_t time_taken_ms = 0;
+       enum dc_connection_type type = dc_connection_none;
+
+       status = core_link_read_dpcd(
+                       link,
+                       DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
+                       &dpcd_data,
+                       sizeof(dpcd_data));
+
+       if (status != DC_OK) {
+               DC_LOG_WARNING("%s: Read DPCD LTTPR_CAP failed - try to toggle DPCD SET_POWER for %lld ms.",
+                               __func__,
+                               timeout_ms);
+               start_ts = dm_get_timestamp(link->ctx);
+
+               do {
+                       if (!dc_link_detect_sink(link, &type) || type == dc_connection_none)
+                               break;
+
+                       dpcd_data = DP_SET_POWER_D3;
+                       status = core_link_write_dpcd(
+                                       link,
+                                       DP_SET_POWER,
+                                       &dpcd_data,
+                                       sizeof(dpcd_data));
+
+                       dpcd_data = DP_SET_POWER_D0;
+                       status = core_link_write_dpcd(
+                                       link,
+                                       DP_SET_POWER,
+                                       &dpcd_data,
+                                       sizeof(dpcd_data));
+
+                       current_ts = dm_get_timestamp(link->ctx);
+                       time_taken_ms = div_u64(dm_get_elapse_time_in_ns(link->ctx, current_ts, start_ts), 1000000);
+               } while (status != DC_OK && time_taken_ms < timeout_ms);
+
+               DC_LOG_WARNING("%s: DPCD SET_POWER %s after %lld ms%s",
+                               __func__,
+                               (status == DC_OK) ? "succeeded" : "failed",
+                               time_taken_ms,
+                               (type == dc_connection_none) ? ". Unplugged." : ".");
+       }
+
+       return status;
+}
+
 static bool retrieve_link_cap(struct dc_link *link)
 {
        /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
@@ -5251,6 +5307,15 @@ static bool retrieve_link_cap(struct dc_link *link)
        dc_link_aux_try_to_configure_timeout(link->ddc,
                        LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
 
+       /* Try to ensure AUX channel active before proceeding. */
+       if (link->dc->debug.aux_wake_wa.bits.enable_wa) {
+               uint64_t timeout_ms = link->dc->debug.aux_wake_wa.bits.timeout_ms;
+
+               if (link->dc->debug.aux_wake_wa.bits.use_default_timeout)
+                       timeout_ms = LINK_AUX_WAKE_TIMEOUT_MS;
+               status = wa_try_to_wake_dprx(link, timeout_ms);
+       }
+
        is_lttpr_present = dp_retrieve_lttpr_cap(link);
        /* Read DP tunneling information. */
        status = dpcd_get_tunneling_device_data(link);
index 7af153434e9e4578b7217d83db2cd0b109d70cb7..d251c3f3a7140434078a7f12443f3d049ba726bb 100644 (file)
@@ -1685,8 +1685,8 @@ bool dc_is_stream_unchanged(
        if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
                return false;
 
-       // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks
-       if (old_stream->audio_info.mode_count != stream->audio_info.mode_count)
+       /*compare audio info*/
+       if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0)
                return false;
 
        return true;
index 4ffab7bb1098b6548f092f5f6ac6f7d664097b2a..9e79f60e6129fe1c5b7caef511383e33b4a7b88f 100644 (file)
@@ -188,6 +188,7 @@ struct dc_caps {
        bool psp_setup_panel_mode;
        bool extended_aux_timeout_support;
        bool dmcub_support;
+       bool zstate_support;
        uint32_t num_of_internal_disp;
        enum dp_protocol_version max_dp_protocol_version;
        unsigned int mall_size_per_mem_channel;
@@ -339,6 +340,7 @@ struct dc_config {
        bool is_asymmetric_memory;
        bool is_single_rank_dimm;
        bool use_pipe_ctx_sync_logic;
+       bool ignore_dpref_ss;
 };
 
 enum visual_confirm {
@@ -525,6 +527,22 @@ union dpia_debug_options {
        uint32_t raw;
 };
 
+/* AUX wake work around options
+ * 0: enable/disable work around
+ * 1: use default timeout LINK_AUX_WAKE_TIMEOUT_MS
+ * 15-2: reserved
+ * 31-16: timeout in ms
+ */
+union aux_wake_wa_options {
+       struct {
+               uint32_t enable_wa : 1;
+               uint32_t use_default_timeout : 1;
+               uint32_t rsvd: 14;
+               uint32_t timeout_ms : 16;
+       } bits;
+       uint32_t raw;
+};
+
 struct dc_debug_data {
        uint32_t ltFailCount;
        uint32_t i2cErrorCount;
@@ -703,14 +721,15 @@ struct dc_debug_options {
        bool enable_driver_sequence_debug;
        enum det_size crb_alloc_policy;
        int crb_alloc_policy_min_disp_count;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        bool disable_z10;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
        bool enable_z9_disable_interface;
        bool enable_sw_cntl_psr;
        union dpia_debug_options dpia_debug;
 #endif
        bool apply_vendor_specific_lttpr_wa;
-       bool ignore_dpref_ss;
+       bool extended_blank_optimization;
+       union aux_wake_wa_options aux_wake_wa;
        uint8_t psr_power_use_phy_fsm;
 };
 
@@ -1369,6 +1388,8 @@ struct dc_sink_init_data {
        bool converter_disable_audio;
 };
 
+bool dc_extended_blank_supported(struct dc *dc);
+
 struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
 
 /* Newer interfaces  */
index 99a750f561f81cb71a7092a329e9505bc4eb7653..c4168c11257c310778ef1e08e00635bed6aabffc 100644 (file)
@@ -131,6 +131,7 @@ union stream_update_flags {
                uint32_t wb_update:1;
                uint32_t dsc_changed : 1;
                uint32_t mst_bw : 1;
+               uint32_t crtc_timing_adjust : 1;
        } bits;
 
        uint32_t raw;
@@ -289,6 +290,7 @@ struct dc_stream_update {
        struct dc_3dlut *lut3d_func;
 
        struct test_pattern *pending_test_pattern;
+       struct dc_crtc_timing_adjust *crtc_timing_adjust;
 };
 
 bool dc_is_stream_unchanged(
index c3e141c19a77e4957eb3a9debdddcb4ba8ac8ed3..83fbea2df410908fe029cd0592092c5ddb393481 100644 (file)
@@ -1497,16 +1497,12 @@ void dcn10_init_hw(struct dc *dc)
                        link->link_status.link_active = true;
        }
 
-       /* Power gate DSCs */
-       if (!is_optimized_init_done) {
-               for (i = 0; i < res_pool->res_cap->num_dsc; i++)
-                       if (hws->funcs.dsc_pg_control != NULL)
-                               hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
-       }
-
        /* we want to turn off all dp displays before doing detection */
        dc_link_blank_all_dp_displays(dc);
 
+       if (hws->funcs.enable_power_gating_plane)
+               hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+
        /* If taking control over from VBIOS, we may want to optimize our first
         * mode set, so we need to skip powering down pipes until we know which
         * pipes we want to use.
@@ -1559,8 +1555,6 @@ void dcn10_init_hw(struct dc *dc)
 
                REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
        }
-       if (hws->funcs.enable_power_gating_plane)
-               hws->funcs.enable_power_gating_plane(dc->hwseq, true);
 
        if (dc->clk_mgr->funcs->notify_wm_ranges)
                dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
@@ -2056,7 +2050,7 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
 {
        struct dc_context *dc_ctx = dc->ctx;
        int i, master = -1, embedded = -1;
-       struct dc_crtc_timing hw_crtc_timing[MAX_PIPES] = {0};
+       struct dc_crtc_timing *hw_crtc_timing;
        uint64_t phase[MAX_PIPES];
        uint64_t modulo[MAX_PIPES];
        unsigned int pclk;
@@ -2067,6 +2061,10 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
        uint32_t dp_ref_clk_100hz =
                dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
 
+       hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
+       if (!hw_crtc_timing)
+               return master;
+
        if (dc->config.vblank_alignment_dto_params &&
                dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
                embedded_h_total =
@@ -2130,6 +2128,8 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
                }
 
        }
+
+       kfree(hw_crtc_timing);
        return master;
 }
 
@@ -2522,14 +2522,18 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
        struct mpc *mpc = dc->res_pool->mpc;
        struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
 
-       if (per_pixel_alpha)
-               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
-       else
-               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
-
        blnd_cfg.overlap_only = false;
        blnd_cfg.global_gain = 0xff;
 
+       if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
+               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+               blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+       } else if (per_pixel_alpha) {
+               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+       } else {
+               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+       }
+
        if (pipe_ctx->plane_state->global_alpha)
                blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
        else
index ab910deed4812bbcd227b80ac266942488e4adf6..b627c41713cc2d16147f099507c08bc3ce03a18b 100644 (file)
@@ -1857,6 +1857,7 @@ void dcn20_optimize_bandwidth(
                struct dc_state *context)
 {
        struct hubbub *hubbub = dc->res_pool->hubbub;
+       int i;
 
        /* program dchubbub watermarks */
        hubbub->funcs->program_watermarks(hubbub,
@@ -1873,6 +1874,17 @@ void dcn20_optimize_bandwidth(
                        dc->clk_mgr,
                        context,
                        true);
+       if (dc_extended_blank_supported(dc) && context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
+               for (i = 0; i < dc->res_pool->pipe_count; ++i) {
+                       struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+                       if (pipe_ctx->stream && pipe_ctx->plane_res.hubp->funcs->program_extended_blank
+                               && pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max
+                               && pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total)
+                                       pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp,
+                                               pipe_ctx->dlg_regs.optimized_min_dst_y_next_start);
+               }
+       }
        /* increase compbuf size */
        if (hubbub->funcs->program_compbuf_size)
                hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
@@ -2332,14 +2344,18 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
        struct mpc *mpc = dc->res_pool->mpc;
        struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
 
-       if (per_pixel_alpha)
-               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
-       else
-               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
-
        blnd_cfg.overlap_only = false;
        blnd_cfg.global_gain = 0xff;
 
+       if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
+               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+               blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+       } else if (per_pixel_alpha) {
+               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+       } else {
+               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+       }
+
        if (pipe_ctx->plane_state->global_alpha)
                blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
        else
index d473708d53999d108304987abfa6c390870b2279..7802d603f79600d2cc53324b867dbdafc9d38319 100644 (file)
@@ -1976,7 +1976,6 @@ int dcn20_validate_apply_pipe_split_flags(
                                /*If need split for odm but 4 way split already*/
                                if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
                                                || !pipe->next_odm_pipe)) {
-                                       ASSERT(0); /* NOT expected yet */
                                        merge[i] = true; /* 4 -> 2 ODM */
                                } else if (split[i] == 0 && pipe->prev_odm_pipe) {
                                        ASSERT(0); /* NOT expected yet */
index 61273265677223494031329f18d186f7b5d424d6..3fe4bfbb98a0732fe862222704110e690b707f3c 100644 (file)
@@ -644,7 +644,7 @@ static const struct dc_debug_options debug_defaults_drv = {
                .clock_trace = true,
                .disable_pplib_clock_request = true,
                .min_disp_clk_khz = 100000,
-               .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+               .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
                .force_single_disp_pipe_split = false,
                .disable_dcc = DCC_ENABLE,
                .vsr_support = true,
index ed0a0e5fd80539e46c184c6d6de036ba2e9ae349..f61ec87638443e377922fdd7c58865f9e23c5b6d 100644 (file)
@@ -547,6 +547,9 @@ void dcn30_init_hw(struct dc *dc)
        /* we want to turn off all dp displays before doing detection */
        dc_link_blank_all_dp_displays(dc);
 
+       if (hws->funcs.enable_power_gating_plane)
+               hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+
        /* If taking control over from VBIOS, we may want to optimize our first
         * mode set, so we need to skip powering down pipes until we know which
         * pipes we want to use.
@@ -624,8 +627,6 @@ void dcn30_init_hw(struct dc *dc)
 
                REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
        }
-       if (hws->funcs.enable_power_gating_plane)
-               hws->funcs.enable_power_gating_plane(dc->hwseq, true);
 
        if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
                dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
index 3e6d6ebd199ee79b7c2c72c57d6de6ca7e20641b..51c5f3685470a3aa158a122850095406a3f524fa 100644 (file)
@@ -1042,5 +1042,7 @@ void hubbub31_construct(struct dcn20_hubbub *hubbub31,
        hubbub31->detile_buf_size = det_size_kb * 1024;
        hubbub31->pixel_chunk_size = pixel_chunk_size_kb * 1024;
        hubbub31->crb_size_segs = config_return_buffer_size_kb / DCN31_CRB_SEGMENT_SIZE_KB;
+
+       hubbub31->debug_test_index_pstate = 0x6;
 }
 
index 53b792b997b7e82b14535cc138791bef13dc8a2e..8ae6117953ca001ee1bb9244b09a129458e11f18 100644 (file)
@@ -54,6 +54,13 @@ void hubp31_soft_reset(struct hubp *hubp, bool reset)
        REG_UPDATE(DCHUBP_CNTL, HUBP_SOFT_RESET, reset);
 }
 
+void hubp31_program_extended_blank(struct hubp *hubp, unsigned int min_dst_y_next_start_optimized)
+{
+       struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+       REG_SET(BLANK_OFFSET_1, 0, MIN_DST_Y_NEXT_START, min_dst_y_next_start_optimized);
+}
+
 static struct hubp_funcs dcn31_hubp_funcs = {
        .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
        .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
@@ -80,6 +87,7 @@ static struct hubp_funcs dcn31_hubp_funcs = {
        .set_unbounded_requesting = hubp31_set_unbounded_requesting,
        .hubp_soft_reset = hubp31_soft_reset,
        .hubp_in_blank = hubp1_in_blank,
+       .program_extended_blank = hubp31_program_extended_blank,
 };
 
 bool hubp31_construct(
index 4be2286809093be0ce48747d59fc3ea2edf95ae4..631d8ac63aa41ba07dbff59c1dd7dfcd0f3a9b20 100644 (file)
@@ -199,6 +199,9 @@ void dcn31_init_hw(struct dc *dc)
        /* we want to turn off all dp displays before doing detection */
        dc_link_blank_all_dp_displays(dc);
 
+       if (hws->funcs.enable_power_gating_plane)
+               hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+
        /* If taking control over from VBIOS, we may want to optimize our first
         * mode set, so we need to skip powering down pipes until we know which
         * pipes we want to use.
@@ -248,8 +251,6 @@ void dcn31_init_hw(struct dc *dc)
 
                REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
        }
-       if (hws->funcs.enable_power_gating_plane)
-               hws->funcs.enable_power_gating_plane(dc->hwseq, true);
 
        if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
                dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
@@ -338,20 +339,20 @@ void dcn31_enable_power_gating_plane(
        bool enable)
 {
        bool force_on = true; /* disable power gating */
+       uint32_t org_ip_request_cntl = 0;
 
        if (enable && !hws->ctx->dc->debug.disable_hubp_power_gate)
                force_on = false;
 
+       REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+       if (org_ip_request_cntl == 0)
+               REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
        /* DCHUBP0/1/2/3/4/5 */
        REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
-       REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
        REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
-       REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
        /* DPP0/1/2/3/4/5 */
        REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
-       REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
        REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
-       REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
 
        force_on = true; /* disable power gating */
        if (enable && !hws->ctx->dc->debug.disable_dsc_power_gate)
@@ -359,11 +360,11 @@ void dcn31_enable_power_gating_plane(
 
        /* DCS0/1/2/3/4/5 */
        REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
-       REG_WAIT(DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
        REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
-       REG_WAIT(DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
        REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
-       REG_WAIT(DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
+
+       if (org_ip_request_cntl == 0)
+               REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
 }
 
 void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
index 8afe2130d7c54ff911acf8df48e0ab5714f42ecb..e05527a3a8ba26776ae52999f46768d648c33f3c 100644 (file)
@@ -124,7 +124,6 @@ static bool optc31_enable_crtc(struct timing_generator *optc)
 static bool optc31_disable_crtc(struct timing_generator *optc)
 {
        struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
        /* disable otg request until end of the first line
         * in the vertical blank region
         */
@@ -138,6 +137,7 @@ static bool optc31_disable_crtc(struct timing_generator *optc)
        REG_WAIT(OTG_CLOCK_CONTROL,
                        OTG_BUSY, 0,
                        1, 100000);
+       optc1_clear_optc_underflow(optc);
 
        return true;
 }
@@ -158,6 +158,9 @@ static bool optc31_immediate_disable_crtc(struct timing_generator *optc)
                        OTG_BUSY, 0,
                        1, 100000);
 
+       /* clear the false state */
+       optc1_clear_optc_underflow(optc);
+
        return true;
 }
 
index 89b7b6b7254ac8ea97d1bac7acce2fd14d44349f..63934ecf6be84207a3ba839d86564f7088604ec7 100644 (file)
@@ -2032,7 +2032,9 @@ bool dcn31_validate_bandwidth(struct dc *dc,
 
        BW_VAL_TRACE_COUNT();
 
+       DC_FP_START();
        out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+       DC_FP_END();
 
        // Disable fast_validate to set min dcfclk in alculate_wm_and_dlg
        if (pipe_cnt == 0)
@@ -2232,6 +2234,7 @@ static bool dcn31_resource_construct(
        dc->caps.extended_aux_timeout_support = true;
        dc->caps.dmcub_support = true;
        dc->caps.is_apu = true;
+       dc->caps.zstate_support = true;
 
        /* Color pipeline capabilities */
        dc->caps.color.dpp.dcn_arch = 1;
index 2f6122153bdb53f75f798e3f674519f3dfc0a018..f93af45aeab4b44a90196e45b19628cbe4b6d391 100644 (file)
@@ -722,8 +722,10 @@ static enum dcn_zstate_support_state  decide_zstate_support(struct dc *dc, struc
 {
        int plane_count;
        int i;
+       unsigned int optimized_min_dst_y_next_start_us;
 
        plane_count = 0;
+       optimized_min_dst_y_next_start_us = 0;
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                if (context->res_ctx.pipe_ctx[i].plane_state)
                        plane_count++;
@@ -744,11 +746,22 @@ static enum dcn_zstate_support_state  decide_zstate_support(struct dc *dc, struc
                struct dc_link *link = context->streams[0]->sink->link;
                struct dc_stream_status *stream_status = &context->stream_status[0];
 
+               if (dc_extended_blank_supported(dc)) {
+                       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+                               if (context->res_ctx.pipe_ctx[i].stream == context->streams[0]
+                                       && context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min == context->res_ctx.pipe_ctx[i].stream->adjust.v_total_max
+                                       && context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min > context->res_ctx.pipe_ctx[i].stream->timing.v_total) {
+                                               optimized_min_dst_y_next_start_us =
+                                                       context->res_ctx.pipe_ctx[i].dlg_regs.optimized_min_dst_y_next_start_us;
+                                               break;
+                               }
+                       }
+               }
                /* zstate only supported on PWRSEQ0  and when there's <2 planes*/
                if (link->link_index != 0 || stream_status->plane_count > 1)
                        return DCN_ZSTATE_SUPPORT_DISALLOW;
 
-               if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
+               if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000)
                        return DCN_ZSTATE_SUPPORT_ALLOW;
                else if (link->psr_settings.psr_version == DC_PSR_VERSION_1 && !dc->debug.disable_psr)
                        return DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
@@ -786,8 +799,6 @@ void dcn20_calculate_dlg_params(
                                                        != dm_dram_clock_change_unsupported;
        context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
 
-       context->bw_ctx.bw.dcn.clk.zstate_support = decide_zstate_support(dc, context);
-
        context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
 
        if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
@@ -843,6 +854,7 @@ void dcn20_calculate_dlg_params(
                                &pipes[pipe_idx].pipe);
                pipe_idx++;
        }
+       context->bw_ctx.bw.dcn.clk.zstate_support = decide_zstate_support(dc, context);
 }
 
 static void swizzle_to_dml_params(
index e0fecf127bd5a9baecbd753685f9ac1c02117df7..53d760e169e61f5763ff2e5fab4f053453675b85 100644 (file)
@@ -1055,6 +1055,7 @@ static void dml_rq_dlg_get_dlg_params(
 
        float vba__refcyc_per_req_delivery_pre_l = get_refcyc_per_req_delivery_pre_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;  // From VBA
        float vba__refcyc_per_req_delivery_l = get_refcyc_per_req_delivery_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;  // From VBA
+       int blank_lines;
 
        memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
        memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
@@ -1080,6 +1081,18 @@ static void dml_rq_dlg_get_dlg_params(
        dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
 
        disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2));
+       blank_lines = (dst->vblank_end + dst->vtotal_min - dst->vblank_start - dst->vstartup_start - 1);
+       if (blank_lines < 0)
+               blank_lines = 0;
+       if (blank_lines != 0) {
+               disp_dlg_regs->optimized_min_dst_y_next_start_us =
+                       ((unsigned int) blank_lines * dst->hactive) / (unsigned int) dst->pixel_rate_mhz;
+               disp_dlg_regs->optimized_min_dst_y_next_start =
+                       (unsigned int)(((double) (dlg_vblank_start + blank_lines)) * dml_pow(2, 2));
+       } else {
+               // use unoptimized value
+               disp_dlg_regs->optimized_min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start;
+       }
        ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
 
        dml_print("DML_DLG: %s: min_ttu_vblank (us)         = %3.2f\n", __func__, min_ttu_vblank);
index 59f0a61c33cf9018deb5d792bd76a5d6619420e4..2df660cd8801b34dbd7b33a385571f16230b8838 100644 (file)
@@ -446,6 +446,8 @@ struct _vcs_dpi_display_dlg_regs_st {
        unsigned int refcyc_h_blank_end;
        unsigned int dlg_vblank_end;
        unsigned int min_dst_y_next_start;
+       unsigned int optimized_min_dst_y_next_start;
+       unsigned int optimized_min_dst_y_next_start_us;
        unsigned int refcyc_per_htotal;
        unsigned int refcyc_x_after_scaler;
        unsigned int dst_y_after_scaler;
index efc2339f1fa00b4e901a247043e4dab793619ef2..4385d19bc489193f5f93575092d9fa9da7d95eca 100644 (file)
@@ -864,11 +864,11 @@ static bool setup_dsc_config(
                min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h);
        }
 
+       is_dsc_possible = (min_slices_h <= max_slices_h);
+
        if (pic_width % min_slices_h != 0)
                min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first?
 
-       is_dsc_possible = (min_slices_h <= max_slices_h);
-
        if (min_slices_h == 0 && max_slices_h == 0)
                is_dsc_possible = false;
 
index ab9939db8cea881040b1f4577c0a5730ea62e695..44f167d2584f522e10104f0a7e15de83b45efb8d 100644 (file)
@@ -33,6 +33,7 @@
 #define MAX_MTP_SLOT_COUNT 64
 #define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
 #define TRAINING_AUX_RD_INTERVAL 100 //us
+#define LINK_AUX_WAKE_TIMEOUT_MS 1500 // Timeout when trying to wake unresponsive DPRX.
 
 struct dc_link;
 struct dc_stream_state;
index e45b7993c5c574861aff875ca890ca1d3e9982e9..ad69d78c4ac347bf0c5256f02e1cf357add6ff24 100644 (file)
@@ -195,6 +195,9 @@ struct hubp_funcs {
 
        void (*hubp_set_flip_int)(struct hubp *hubp);
 
+       void (*program_extended_blank)(struct hubp *hubp,
+                       unsigned int min_dst_y_next_start_optimized);
+
        void (*hubp_wait_pipe_read_start)(struct hubp *hubp);
 };
 
index b691aa45e84fbcec96c418d9df3fa7ec327eada5..79bc207415bcb0b0a259642a20429c923d374d4d 100644 (file)
@@ -100,7 +100,8 @@ enum vsc_packet_revision {
 //PB7 = MD0
 #define MASK_VTEM_MD0__VRR_EN         0x01
 #define MASK_VTEM_MD0__M_CONST        0x02
-#define MASK_VTEM_MD0__RESERVED2      0x0C
+#define MASK_VTEM_MD0__QMS_EN         0x04
+#define MASK_VTEM_MD0__RESERVED2      0x08
 #define MASK_VTEM_MD0__FVA_FACTOR_M1  0xF0
 
 //MD1
@@ -109,7 +110,7 @@ enum vsc_packet_revision {
 //MD2
 #define MASK_VTEM_MD2__BASE_REFRESH_RATE_98  0x03
 #define MASK_VTEM_MD2__RB                    0x04
-#define MASK_VTEM_MD2__RESERVED3             0xF8
+#define MASK_VTEM_MD2__NEXT_TFR              0xF8
 
 //MD3
 #define MASK_VTEM_MD3__BASE_REFRESH_RATE_07  0xFF
index 89fbee568be4ac514d5110536928ce379ca33c8f..5504d81c77b775bb47e6cc3934effddb37cd3aba 100644 (file)
@@ -173,6 +173,17 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
 
        if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
                return false;
+       /* Don't use baco for reset in S3.
+        * This is a workaround for some platforms
+        * where entering BACO during suspend
+        * seems to cause reboots or hangs.
+        * This might be related to the fact that BACO controls
+        * power to the whole GPU including devices like audio and USB.
+        * Powering down/up everything may adversely affect these other
+        * devices.  Needs more investigation.
+        */
+       if (adev->in_s3)
+               return false;
 
        mutex_lock(&adev->pm.mutex);
 
@@ -500,6 +511,9 @@ int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
        struct smu_context *smu = adev->powerplay.pp_handle;
        int ret = 0;
 
+       if (!is_support_sw_smu(adev))
+               return -EOPNOTSUPP;
+
        mutex_lock(&adev->pm.mutex);
        ret = smu_send_hbm_bad_pages_num(smu, size);
        mutex_unlock(&adev->pm.mutex);
@@ -512,6 +526,9 @@ int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t si
        struct smu_context *smu = adev->powerplay.pp_handle;
        int ret = 0;
 
+       if (!is_support_sw_smu(adev))
+               return -EOPNOTSUPP;
+
        mutex_lock(&adev->pm.mutex);
        ret = smu_send_hbm_bad_channel_flag(smu, size);
        mutex_unlock(&adev->pm.mutex);
index 9ddd8491ff00847d5c0becbcdec1be4fa3edcb50..ede71de2343dcfba4952c9f20ad62c5614b64e12 100644 (file)
@@ -773,13 +773,13 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinFclkByFreq,
                                                hwmgr->display_config->num_display > 3 ?
-                                               data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk :
+                                               (data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk / 100) :
                                                min_mclk,
                                                NULL);
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinSocclkByFreq,
-                                               data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk,
+                                               data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk / 100,
                                                NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinVcn,
@@ -792,11 +792,11 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                                                NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxFclkByFreq,
-                                               data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk,
+                                               data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk / 100,
                                                NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxSocclkByFreq,
-                                               data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk,
+                                               data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk / 100,
                                                NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxVcn,
index 7bfac029e51382e6c331e5591b1c1f7505f0e9cc..b81711c4ff3358c126742c05137e40269f49b8d9 100644 (file)
@@ -991,7 +991,7 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu,
                return -EINVAL;
        }
 
-       if (sclk_min && sclk_max) {
+       if (sclk_min && sclk_max && smu_v13_0_5_clk_dpm_is_enabled(smu, SMU_SCLK)) {
                ret = smu_v13_0_5_set_soft_freq_limited_range(smu,
                                                            SMU_SCLK,
                                                            sclk_min,
index 026e4e29a0f374436a839303c019eef0541fb45a..f4df344509a87245d97f701433a4de41dc702d08 100644 (file)
@@ -214,6 +214,29 @@ int drm_of_encoder_active_endpoint(struct device_node *node,
 }
 EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint);
 
+static int find_panel_or_bridge(struct device_node *node,
+                               struct drm_panel **panel,
+                               struct drm_bridge **bridge)
+{
+       if (panel) {
+               *panel = of_drm_find_panel(node);
+               if (!IS_ERR(*panel))
+                       return 0;
+
+               /* Clear the panel pointer in case of error. */
+               *panel = NULL;
+       }
+
+       /* No panel found yet, check for a bridge next. */
+       if (bridge) {
+               *bridge = of_drm_find_bridge(node);
+               if (*bridge)
+                       return 0;
+       }
+
+       return -EPROBE_DEFER;
+}
+
 /**
  * drm_of_find_panel_or_bridge - return connected panel or bridge device
  * @np: device tree node containing encoder output ports
@@ -236,66 +259,44 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
                                struct drm_panel **panel,
                                struct drm_bridge **bridge)
 {
-       int ret = -EPROBE_DEFER;
-       struct device_node *remote;
+       struct device_node *node;
+       int ret;
 
        if (!panel && !bridge)
                return -EINVAL;
+
        if (panel)
                *panel = NULL;
-
-       /**
-        * Devices can also be child nodes when we also control that device
-        * through the upstream device (ie, MIPI-DCS for a MIPI-DSI device).
-        *
-        * Lookup for a child node of the given parent that isn't either port
-        * or ports.
-        */
-       for_each_available_child_of_node(np, remote) {
-               if (of_node_name_eq(remote, "port") ||
-                   of_node_name_eq(remote, "ports"))
-                       continue;
-
-               goto of_find_panel_or_bridge;
+       if (bridge)
+               *bridge = NULL;
+
+       /* Check for a graph on the device node first. */
+       if (of_graph_is_present(np)) {
+               node = of_graph_get_remote_node(np, port, endpoint);
+               if (node) {
+                       ret = find_panel_or_bridge(node, panel, bridge);
+                       of_node_put(node);
+
+                       if (!ret)
+                               return 0;
+               }
        }
 
-       /*
-        * of_graph_get_remote_node() produces a noisy error message if port
-        * node isn't found and the absence of the port is a legit case here,
-        * so at first we silently check whether graph presents in the
-        * device-tree node.
-        */
-       if (!of_graph_is_present(np))
-               return -ENODEV;
-
-       remote = of_graph_get_remote_node(np, port, endpoint);
-
-of_find_panel_or_bridge:
-       if (!remote)
-               return -ENODEV;
+       /* Otherwise check for any child node other than port/ports. */
+       for_each_available_child_of_node(np, node) {
+               if (of_node_name_eq(node, "port") ||
+                   of_node_name_eq(node, "ports"))
+                       continue;
 
-       if (panel) {
-               *panel = of_drm_find_panel(remote);
-               if (!IS_ERR(*panel))
-                       ret = 0;
-               else
-                       *panel = NULL;
-       }
-
-       /* No panel found yet, check for a bridge next. */
-       if (bridge) {
-               if (ret) {
-                       *bridge = of_drm_find_bridge(remote);
-                       if (*bridge)
-                               ret = 0;
-               } else {
-                       *bridge = NULL;
-               }
+               ret = find_panel_or_bridge(node, panel, bridge);
+               of_node_put(node);
 
+               /* Stop at the first found occurrence. */
+               if (!ret)
+                       return 0;
        }
 
-       of_node_put(remote);
-       return ret;
+       return -EPROBE_DEFER;
 }
 EXPORT_SYMBOL_GPL(drm_of_find_panel_or_bridge);
 
index c3ea243d414da3dd5757ead748145fd6367b7d19..0c5c43852e24dcbb69548481e1317da7460caff1 100644 (file)
@@ -70,7 +70,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
         * mmap ioctl is disallowed for all discrete platforms,
         * and for all platforms with GRAPHICS_VER > 12.
         */
-       if (IS_DGFX(i915) || GRAPHICS_VER(i915) > 12)
+       if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) > IP_VER(12, 0))
                return -EOPNOTSUPP;
 
        if (args->flags & ~(I915_MMAP_WC))
index 87428fb23d9ffa5e53d8b20a76c3d71402265a77..a2277a0d6d06fb55f129531b1e7dd660e269113f 100644 (file)
@@ -222,6 +222,7 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev)
        struct device_node *np = pdev->dev.of_node;
        const struct of_device_id *match = of_match_node(dw_hdmi_imx_dt_ids, np);
        struct imx_hdmi *hdmi;
+       int ret;
 
        hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
        if (!hdmi)
@@ -243,10 +244,15 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev)
        hdmi->bridge = of_drm_find_bridge(np);
        if (!hdmi->bridge) {
                dev_err(hdmi->dev, "Unable to find bridge\n");
+               dw_hdmi_remove(hdmi->hdmi);
                return -ENODEV;
        }
 
-       return component_add(&pdev->dev, &dw_hdmi_imx_ops);
+       ret = component_add(&pdev->dev, &dw_hdmi_imx_ops);
+       if (ret)
+               dw_hdmi_remove(hdmi->hdmi);
+
+       return ret;
 }
 
 static int dw_hdmi_imx_remove(struct platform_device *pdev)
index e5078d03020d90e84b2ca6acbc877ae52234d4ac..fb0e951248f685ed0ace2f601c1fba6478db342f 100644 (file)
@@ -572,6 +572,8 @@ static int imx_ldb_panel_ddc(struct device *dev,
                edidp = of_get_property(child, "edid", &edid_len);
                if (edidp) {
                        channel->edid = kmemdup(edidp, edid_len, GFP_KERNEL);
+                       if (!channel->edid)
+                               return -ENOMEM;
                } else if (!channel->panel) {
                        /* fallback to display-timings node */
                        ret = of_get_drm_display_mode(child,
index 06cb1a59b9bcd6fd38f573abfdbca904f9a69cc2..63ba2ad84679183af60a635ea3f014f2f47d0bba 100644 (file)
@@ -75,8 +75,10 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
                ret = of_get_drm_display_mode(np, &imxpd->mode,
                                              &imxpd->bus_flags,
                                              OF_USE_NATIVE_MODE);
-               if (ret)
+               if (ret) {
+                       drm_mode_destroy(connector->dev, mode);
                        return ret;
+               }
 
                drm_mode_copy(mode, &imxpd->mode);
                mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
index 83c31b2ad865b19c62caab55a89130739e81f5b3..ccc4fcf7a630f49a62201927f871160803debc53 100644 (file)
@@ -1742,7 +1742,7 @@ a6xx_create_private_address_space(struct msm_gpu *gpu)
                return ERR_CAST(mmu);
 
        return msm_gem_address_space_create(mmu,
-               "gpu", 0x100000000ULL, 0x1ffffffffULL);
+               "gpu", 0x100000000ULL, SZ_4G);
 }
 
 static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
index 89cfd84760d7e1887703f0f02c43d5621637ec63..8706bcdd1472e6c8e0371238e1efb53aedc04cd0 100644 (file)
@@ -599,43 +599,91 @@ static const struct of_device_id dt_match[] = {
        {}
 };
 
-#ifdef CONFIG_PM
-static int adreno_resume(struct device *dev)
+static int adreno_runtime_resume(struct device *dev)
 {
        struct msm_gpu *gpu = dev_to_gpu(dev);
 
        return gpu->funcs->pm_resume(gpu);
 }
 
-static int active_submits(struct msm_gpu *gpu)
+static int adreno_runtime_suspend(struct device *dev)
 {
-       int active_submits;
-       mutex_lock(&gpu->active_lock);
-       active_submits = gpu->active_submits;
-       mutex_unlock(&gpu->active_lock);
-       return active_submits;
+       struct msm_gpu *gpu = dev_to_gpu(dev);
+
+       /*
+        * We should be holding a runpm ref, which will prevent
+        * runtime suspend.  In the system suspend path, we've
+        * already waited for active jobs to complete.
+        */
+       WARN_ON_ONCE(gpu->active_submits);
+
+       return gpu->funcs->pm_suspend(gpu);
+}
+
+static void suspend_scheduler(struct msm_gpu *gpu)
+{
+       int i;
+
+       /*
+        * Shut down the scheduler before we force suspend, so that
+        * suspend isn't racing with scheduler kthread feeding us
+        * more work.
+        *
+        * Note, we just want to park the thread, and let any jobs
+        * that are already on the hw queue complete normally, as
+        * opposed to the drm_sched_stop() path used for handling
+        * faulting/timed-out jobs.  We can't really cancel any jobs
+        * already on the hw queue without racing with the GPU.
+        */
+       for (i = 0; i < gpu->nr_rings; i++) {
+               struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
+               kthread_park(sched->thread);
+       }
 }
 
-static int adreno_suspend(struct device *dev)
+static void resume_scheduler(struct msm_gpu *gpu)
+{
+       int i;
+
+       for (i = 0; i < gpu->nr_rings; i++) {
+               struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
+               kthread_unpark(sched->thread);
+       }
+}
+
+static int adreno_system_suspend(struct device *dev)
 {
        struct msm_gpu *gpu = dev_to_gpu(dev);
-       int remaining;
+       int remaining, ret;
+
+       suspend_scheduler(gpu);
 
        remaining = wait_event_timeout(gpu->retire_event,
-                                      active_submits(gpu) == 0,
+                                      gpu->active_submits == 0,
                                       msecs_to_jiffies(1000));
        if (remaining == 0) {
                dev_err(dev, "Timeout waiting for GPU to suspend\n");
-               return -EBUSY;
+               ret = -EBUSY;
+               goto out;
        }
 
-       return gpu->funcs->pm_suspend(gpu);
+       ret = pm_runtime_force_suspend(dev);
+out:
+       if (ret)
+               resume_scheduler(gpu);
+
+       return ret;
+}
+
+static int adreno_system_resume(struct device *dev)
+{
+       resume_scheduler(dev_to_gpu(dev));
+       return pm_runtime_force_resume(dev);
 }
-#endif
 
 static const struct dev_pm_ops adreno_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
-       SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
+       SYSTEM_SLEEP_PM_OPS(adreno_system_suspend, adreno_system_resume)
+       RUNTIME_PM_OPS(adreno_runtime_suspend, adreno_runtime_resume, NULL)
 };
 
 static struct platform_driver adreno_driver = {
index c515b7cf922c8515753174429bef61c228dc872a..c61b5b283f08d8ad4da80608cb32c50ffb9c49ab 100644 (file)
@@ -54,87 +54,87 @@ struct dpu_intr_reg {
  * When making changes be sure to sync with dpu_hw_intr_reg
  */
 static const struct dpu_intr_reg dpu_intr_set[] = {
-       {
+       [MDP_SSPP_TOP0_INTR] = {
                MDP_SSPP_TOP0_OFF+INTR_CLEAR,
                MDP_SSPP_TOP0_OFF+INTR_EN,
                MDP_SSPP_TOP0_OFF+INTR_STATUS
        },
-       {
+       [MDP_SSPP_TOP0_INTR2] = {
                MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
                MDP_SSPP_TOP0_OFF+INTR2_EN,
                MDP_SSPP_TOP0_OFF+INTR2_STATUS
        },
-       {
+       [MDP_SSPP_TOP0_HIST_INTR] = {
                MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
                MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
                MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
        },
-       {
+       [MDP_INTF0_INTR] = {
                MDP_INTF_0_OFF+INTF_INTR_CLEAR,
                MDP_INTF_0_OFF+INTF_INTR_EN,
                MDP_INTF_0_OFF+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF1_INTR] = {
                MDP_INTF_1_OFF+INTF_INTR_CLEAR,
                MDP_INTF_1_OFF+INTF_INTR_EN,
                MDP_INTF_1_OFF+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF2_INTR] = {
                MDP_INTF_2_OFF+INTF_INTR_CLEAR,
                MDP_INTF_2_OFF+INTF_INTR_EN,
                MDP_INTF_2_OFF+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF3_INTR] = {
                MDP_INTF_3_OFF+INTF_INTR_CLEAR,
                MDP_INTF_3_OFF+INTF_INTR_EN,
                MDP_INTF_3_OFF+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF4_INTR] = {
                MDP_INTF_4_OFF+INTF_INTR_CLEAR,
                MDP_INTF_4_OFF+INTF_INTR_EN,
                MDP_INTF_4_OFF+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF5_INTR] = {
                MDP_INTF_5_OFF+INTF_INTR_CLEAR,
                MDP_INTF_5_OFF+INTF_INTR_EN,
                MDP_INTF_5_OFF+INTF_INTR_STATUS
        },
-       {
+       [MDP_AD4_0_INTR] = {
                MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
                MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
                MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
        },
-       {
+       [MDP_AD4_1_INTR] = {
                MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
                MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
                MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
        },
-       {
+       [MDP_INTF0_7xxx_INTR] = {
                MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_CLEAR,
                MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_EN,
                MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF1_7xxx_INTR] = {
                MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_CLEAR,
                MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_EN,
                MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF2_7xxx_INTR] = {
                MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_CLEAR,
                MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_EN,
                MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF3_7xxx_INTR] = {
                MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_CLEAR,
                MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_EN,
                MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF4_7xxx_INTR] = {
                MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_CLEAR,
                MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_EN,
                MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF5_7xxx_INTR] = {
                MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR,
                MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN,
                MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS
index 1ee824600995805c96fc777f6d793440d75eef90..c478d25f7825a8a9d48293086bdbcc90731a8200 100644 (file)
@@ -98,7 +98,10 @@ static void mdp5_plane_reset(struct drm_plane *plane)
                __drm_atomic_helper_plane_destroy_state(plane->state);
 
        kfree(to_mdp5_plane_state(plane->state));
+       plane->state = NULL;
        mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
+       if (!mdp5_state)
+               return;
        __drm_atomic_helper_plane_reset(plane, &mdp5_state->base);
 }
 
index 5d2ff679105869a5d626dc13a82a2caad5e91294..acfe1b31e0792eabc697d41e4d1ee9e2dfe54b7f 100644 (file)
@@ -176,6 +176,8 @@ void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
        va_list va;
 
        new_blk = kzalloc(sizeof(struct msm_disp_state_block), GFP_KERNEL);
+       if (!new_blk)
+               return;
 
        va_start(va, fmt);
 
index 178b774a5fbd3011a13e825442721a240e20fa04..a42732b6734906d12b161c60562509a8af63992c 100644 (file)
@@ -580,6 +580,12 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
                        dp->dp_display.connector_type, state);
        mutex_unlock(&dp->event_mutex);
 
+       /*
+        * add fail safe mode outside event_mutex scope
+        * to avoid potiential circular lock with drm thread
+        */
+       dp_panel_add_fail_safe_mode(dp->dp_display.connector);
+
        /* uevent will complete connection part */
        return 0;
 };
index f1418722c54928a4b8896501f60f487cceea610f..26c3653c99ec9e58646019278b8fa8fb5ddffc67 100644 (file)
@@ -151,6 +151,15 @@ static int dp_panel_update_modes(struct drm_connector *connector,
        return rc;
 }
 
+void dp_panel_add_fail_safe_mode(struct drm_connector *connector)
+{
+       /* fail safe edid */
+       mutex_lock(&connector->dev->mode_config.mutex);
+       if (drm_add_modes_noedid(connector, 640, 480))
+               drm_set_preferred_mode(connector, 640, 480);
+       mutex_unlock(&connector->dev->mode_config.mutex);
+}
+
 int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
        struct drm_connector *connector)
 {
@@ -207,16 +216,7 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
                        goto end;
                }
 
-               /* fail safe edid */
-               mutex_lock(&connector->dev->mode_config.mutex);
-               if (drm_add_modes_noedid(connector, 640, 480))
-                       drm_set_preferred_mode(connector, 640, 480);
-               mutex_unlock(&connector->dev->mode_config.mutex);
-       } else {
-               /* always add fail-safe mode as backup mode */
-               mutex_lock(&connector->dev->mode_config.mutex);
-               drm_add_modes_noedid(connector, 640, 480);
-               mutex_unlock(&connector->dev->mode_config.mutex);
+               dp_panel_add_fail_safe_mode(connector);
        }
 
        if (panel->aux_cfg_update_done) {
index 9023e5bb4b8b2c570fa12748ed3b08cc5139da0c..99739ea679a77e13ed498c542b1fe9468781d7e8 100644 (file)
@@ -59,6 +59,7 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel);
 int dp_panel_deinit(struct dp_panel *dp_panel);
 int dp_panel_timing_cfg(struct dp_panel *dp_panel);
 void dp_panel_dump_regs(struct dp_panel *dp_panel);
+void dp_panel_add_fail_safe_mode(struct drm_connector *connector);
 int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
                struct drm_connector *connector);
 u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp,
index 0c1b7dde377c949e2e4fc685716b97e7df8d49d8..9f6af0f0fe0053a049fe335ef5a32e02c716b7d3 100644 (file)
@@ -638,7 +638,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
        return connector;
 
 fail:
-       connector->funcs->destroy(msm_dsi->connector);
+       connector->funcs->destroy(connector);
        return ERR_PTR(ret);
 }
 
index affa95eb05fcda3fe114dfcb24272de6bc22d388..9c36b505daab93fd1f4476451985ee3dffb8d3e2 100644 (file)
@@ -274,7 +274,7 @@ bool msm_use_mmu(struct drm_device *dev)
        struct msm_drm_private *priv = dev->dev_private;
 
        /* a2xx comes with its own MMU */
-       return priv->is_a2xx || iommu_present(&platform_bus_type);
+       return priv->is_a2xx || device_iommu_mapped(dev->dev);
 }
 
 static int msm_init_vram(struct drm_device *dev)
index 02b9ae65a96a8a56c17632db9942a20b4ed5916b..a4f61972667b52140ad98f767b97aa55484b4eca 100644 (file)
@@ -926,6 +926,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
                                        get_pid_task(aspace->pid, PIDTYPE_PID);
                                if (task) {
                                        comm = kstrdup(task->comm, GFP_KERNEL);
+                                       put_task_struct(task);
                                } else {
                                        comm = NULL;
                                }
index e1772211b0a4b1567456e0eea30f5db88155f739..612310d5d4812eb056d6a9097acb64beb09c7cdc 100644 (file)
@@ -216,6 +216,7 @@ gm20b_pmu = {
        .intr = gt215_pmu_intr,
        .recv = gm20b_pmu_recv,
        .initmsg = gm20b_pmu_initmsg,
+       .reset = gf100_pmu_reset,
 };
 
 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
index 6bf7fc1bd1e3b1deb1808286b748eaef39b301f5..1a6f9c3af5ecde69df97d5892d3cd9dc926ae857 100644 (file)
@@ -23,7 +23,7 @@
  */
 #include "priv.h"
 
-static void
+void
 gp102_pmu_reset(struct nvkm_pmu *pmu)
 {
        struct nvkm_device *device = pmu->subdev.device;
index ba1583bb618b2ef8379dcf5cfa99b0ccfdbd264e..94cfb1791af6ea50665b5a1a4a84b406538b096e 100644 (file)
@@ -83,6 +83,7 @@ gp10b_pmu = {
        .intr = gt215_pmu_intr,
        .recv = gm20b_pmu_recv,
        .initmsg = gm20b_pmu_initmsg,
+       .reset = gp102_pmu_reset,
 };
 
 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
index bcaade758ff728bd528fafb262fb608f0b08a09a..21abf31f44420247efd4ffb0c95ab3a988cab7f5 100644 (file)
@@ -41,6 +41,7 @@ int gt215_pmu_send(struct nvkm_pmu *, u32[2], u32, u32, u32, u32);
 
 bool gf100_pmu_enabled(struct nvkm_pmu *);
 void gf100_pmu_reset(struct nvkm_pmu *);
+void gp102_pmu_reset(struct nvkm_pmu *pmu);
 
 void gk110_pmu_pgob(struct nvkm_pmu *, bool);
 
index a07ef26234e57ccf71b5bc000b15772cc1601d15..6826f4d4826a4b12520c813da4540f47f2296efd 100644 (file)
@@ -612,8 +612,10 @@ static int ili9341_dbi_probe(struct spi_device *spi, struct gpio_desc *dc,
        int ret;
 
        vcc = devm_regulator_get_optional(dev, "vcc");
-       if (IS_ERR(vcc))
+       if (IS_ERR(vcc)) {
                dev_err(dev, "get optional vcc failed\n");
+               vcc = NULL;
+       }
 
        dbidev = devm_drm_dev_alloc(dev, &ili9341_dbi_driver,
                                    struct mipi_dbi_dev, drm);
index 666223c6bec4d52da44b38d3a0263f19bae2f4a6..0a34e0ab4fe60e7d4caad5d9c0272c3b4a391d05 100644 (file)
@@ -447,8 +447,9 @@ static void ipu_di_config_clock(struct ipu_di *di,
 
                error = rate / (sig->mode.pixelclock / 1000);
 
-               dev_dbg(di->ipu->dev, "  IPU clock can give %lu with divider %u, error %d.%u%%\n",
-                       rate, div, (signed)(error - 1000) / 10, error % 10);
+               dev_dbg(di->ipu->dev, "  IPU clock can give %lu with divider %u, error %c%d.%d%%\n",
+                       rate, div, error < 1000 ? '-' : '+',
+                       abs(error - 1000) / 10, abs(error - 1000) % 10);
 
                /* Allow a 1% error */
                if (error < 1010 && error >= 990) {
index 7a674873d7947fc040b6c1513b52cba0f1fea88f..a95a7cbc4a59c8e03545543df28bec2265a55a06 100644 (file)
@@ -405,14 +405,25 @@ config HOLTEK_FF
          Say Y here if you have a Holtek On Line Grip based game controller
          and want to have force feedback support for it.
 
+config HID_VIVALDI_COMMON
+       tristate
+       help
+         ChromeOS Vivaldi HID parsing support library. This is a hidden
+         option so that drivers can use common code to parse the HID
+         descriptors for vivaldi function row keymap.
+
 config HID_GOOGLE_HAMMER
        tristate "Google Hammer Keyboard"
+       select HID_VIVALDI_COMMON
+       select INPUT_VIVALDIFMAP
        depends on USB_HID && LEDS_CLASS && CROS_EC
        help
        Say Y here if you have a Google Hammer device.
 
 config HID_VIVALDI
        tristate "Vivaldi Keyboard"
+       select HID_VIVALDI_COMMON
+       select INPUT_VIVALDIFMAP
        depends on HID
        help
          Say Y here if you want to enable support for Vivaldi keyboards.
index d5ce8d747b140b1af99e9d3ab9418ab06383cd9e..345ac5581bd85537896f2d169f7d5d41cd072b78 100644 (file)
@@ -50,6 +50,7 @@ obj-$(CONFIG_HID_FT260)               += hid-ft260.o
 obj-$(CONFIG_HID_GEMBIRD)      += hid-gembird.o
 obj-$(CONFIG_HID_GFRM)         += hid-gfrm.o
 obj-$(CONFIG_HID_GLORIOUS)  += hid-glorious.o
+obj-$(CONFIG_HID_VIVALDI_COMMON) += hid-vivaldi-common.o
 obj-$(CONFIG_HID_GOOGLE_HAMMER)        += hid-google-hammer.o
 obj-$(CONFIG_HID_VIVALDI)      += hid-vivaldi.o
 obj-$(CONFIG_HID_GT683R)       += hid-gt683r.o
index ddbe0de177e26961c4b3599e60b4ed0dd0b0ae6b..ff40f1e55c217bf9d37cd685f9df5033112f526a 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/acpi.h>
 #include <linux/hid.h>
+#include <linux/input/vivaldi-fmap.h>
 #include <linux/leds.h>
 #include <linux/module.h>
 #include <linux/of.h>
@@ -25,6 +26,7 @@
 #include <asm/unaligned.h>
 
 #include "hid-ids.h"
+#include "hid-vivaldi-common.h"
 
 /*
  * C(hrome)B(ase)A(ttached)S(witch) - switch exported by Chrome EC and reporting
@@ -340,9 +342,9 @@ static int hammer_kbd_brightness_set_blocking(struct led_classdev *cdev,
 static int hammer_register_leds(struct hid_device *hdev)
 {
        struct hammer_kbd_leds *kbd_backlight;
-       int error;
 
-       kbd_backlight = kzalloc(sizeof(*kbd_backlight), GFP_KERNEL);
+       kbd_backlight = devm_kzalloc(&hdev->dev, sizeof(*kbd_backlight),
+                                    GFP_KERNEL);
        if (!kbd_backlight)
                return -ENOMEM;
 
@@ -356,26 +358,7 @@ static int hammer_register_leds(struct hid_device *hdev)
        /* Set backlight to 0% initially. */
        hammer_kbd_brightness_set_blocking(&kbd_backlight->cdev, 0);
 
-       error = led_classdev_register(&hdev->dev, &kbd_backlight->cdev);
-       if (error)
-               goto err_free_mem;
-
-       hid_set_drvdata(hdev, kbd_backlight);
-       return 0;
-
-err_free_mem:
-       kfree(kbd_backlight);
-       return error;
-}
-
-static void hammer_unregister_leds(struct hid_device *hdev)
-{
-       struct hammer_kbd_leds *kbd_backlight = hid_get_drvdata(hdev);
-
-       if (kbd_backlight) {
-               led_classdev_unregister(&kbd_backlight->cdev);
-               kfree(kbd_backlight);
-       }
+       return devm_led_classdev_register(&hdev->dev, &kbd_backlight->cdev);
 }
 
 #define HID_UP_GOOGLEVENDOR    0xffd10000
@@ -512,11 +495,23 @@ out:
        kfree(buf);
 }
 
+static void hammer_stop(void *hdev)
+{
+       hid_hw_stop(hdev);
+}
+
 static int hammer_probe(struct hid_device *hdev,
                        const struct hid_device_id *id)
 {
+       struct vivaldi_data *vdata;
        int error;
 
+       vdata = devm_kzalloc(&hdev->dev, sizeof(*vdata), GFP_KERNEL);
+       if (!vdata)
+               return -ENOMEM;
+
+       hid_set_drvdata(hdev, vdata);
+
        error = hid_parse(hdev);
        if (error)
                return error;
@@ -525,6 +520,10 @@ static int hammer_probe(struct hid_device *hdev,
        if (error)
                return error;
 
+       error = devm_add_action(&hdev->dev, hammer_stop, hdev);
+       if (error)
+               return error;
+
        /*
         * We always want to poll for, and handle tablet mode events from
         * devices that have folded usage, even when nobody has opened the input
@@ -577,15 +576,13 @@ static void hammer_remove(struct hid_device *hdev)
                spin_unlock_irqrestore(&cbas_ec_lock, flags);
        }
 
-       hammer_unregister_leds(hdev);
-
-       hid_hw_stop(hdev);
+       /* Unregistering LEDs and stopping the hardware is done via devm */
 }
 
 static const struct hid_device_id hammer_devices[] = {
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
-       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+       { HID_DEVICE(BUS_USB, HID_GROUP_VIVALDI,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_EEL) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
@@ -610,6 +607,8 @@ static struct hid_driver hammer_driver = {
        .id_table = hammer_devices,
        .probe = hammer_probe,
        .remove = hammer_remove,
+       .feature_mapping = vivaldi_feature_mapping,
+       .input_configured = vivaldi_input_configured,
        .input_mapping = hammer_input_mapping,
        .event = hammer_event,
 };
diff --git a/drivers/hid/hid-vivaldi-common.c b/drivers/hid/hid-vivaldi-common.c
new file mode 100644 (file)
index 0000000..8b3e515
--- /dev/null
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helpers for ChromeOS HID Vivaldi keyboards
+ *
+ * Copyright (C) 2022 Google, Inc
+ */
+
+#include <linux/export.h>
+#include <linux/hid.h>
+#include <linux/input/vivaldi-fmap.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include "hid-vivaldi-common.h"
+
+#define MIN_FN_ROW_KEY 1
+#define MAX_FN_ROW_KEY VIVALDI_MAX_FUNCTION_ROW_KEYS
+#define HID_VD_FN_ROW_PHYSMAP 0x00000001
+#define HID_USAGE_FN_ROW_PHYSMAP (HID_UP_GOOGLEVENDOR | HID_VD_FN_ROW_PHYSMAP)
+
+/**
+ * vivaldi_feature_mapping - Fill out vivaldi keymap data exposed via HID
+ * @hdev: HID device to parse
+ * @field: HID field to parse
+ * @usage: HID usage to parse
+ *
+ * Note: this function assumes that driver data attached to @hdev contains an
+ * instance of &struct vivaldi_data at the very beginning.
+ */
+void vivaldi_feature_mapping(struct hid_device *hdev,
+                            struct hid_field *field, struct hid_usage *usage)
+{
+       struct vivaldi_data *data = hid_get_drvdata(hdev);
+       struct hid_report *report = field->report;
+       u8 *report_data, *buf;
+       u32 report_len;
+       unsigned int fn_key;
+       int ret;
+
+       if (field->logical != HID_USAGE_FN_ROW_PHYSMAP ||
+           (usage->hid & HID_USAGE_PAGE) != HID_UP_ORDINAL)
+               return;
+
+       fn_key = usage->hid & HID_USAGE;
+       if (fn_key < MIN_FN_ROW_KEY || fn_key > MAX_FN_ROW_KEY)
+               return;
+
+       if (fn_key > data->num_function_row_keys)
+               data->num_function_row_keys = fn_key;
+
+       report_data = buf = hid_alloc_report_buf(report, GFP_KERNEL);
+       if (!report_data)
+               return;
+
+       report_len = hid_report_len(report);
+       if (!report->id) {
+               /*
+                * hid_hw_raw_request() will stuff report ID (which will be 0)
+                * into the first byte of the buffer even for unnumbered
+                * reports, so we need to account for this to avoid getting
+                * -EOVERFLOW in return.
+                * Note that hid_alloc_report_buf() adds 7 bytes to the size
+                * so we can safely say that we have space for an extra byte.
+                */
+               report_len++;
+       }
+
+       ret = hid_hw_raw_request(hdev, report->id, report_data,
+                                report_len, HID_FEATURE_REPORT,
+                                HID_REQ_GET_REPORT);
+       if (ret < 0) {
+               dev_warn(&hdev->dev, "failed to fetch feature %d\n",
+                        field->report->id);
+               goto out;
+       }
+
+       if (!report->id) {
+               /*
+                * Undo the damage from hid_hw_raw_request() for unnumbered
+                * reports.
+                */
+               report_data++;
+               report_len--;
+       }
+
+       ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, report_data,
+                                  report_len, 0);
+       if (ret) {
+               dev_warn(&hdev->dev, "failed to report feature %d\n",
+                        field->report->id);
+               goto out;
+       }
+
+       data->function_row_physmap[fn_key - MIN_FN_ROW_KEY] =
+               field->value[usage->usage_index];
+
+out:
+       kfree(buf);
+}
+EXPORT_SYMBOL_GPL(vivaldi_feature_mapping);
+
+static ssize_t function_row_physmap_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       struct hid_device *hdev = to_hid_device(dev);
+       struct vivaldi_data *data = hid_get_drvdata(hdev);
+
+       return vivaldi_function_row_physmap_show(data, buf);
+}
+
+static DEVICE_ATTR_RO(function_row_physmap);
+static struct attribute *vivaldi_sysfs_attrs[] = {
+       &dev_attr_function_row_physmap.attr,
+       NULL
+};
+
+static const struct attribute_group vivaldi_attribute_group = {
+       .attrs = vivaldi_sysfs_attrs,
+};
+
+/**
+ * vivaldi_input_configured - Complete initialization of device using vivaldi map
+ * @hdev: HID device to which vivaldi attributes should be attached
+ * @hidinput: HID input device (unused)
+ */
+int vivaldi_input_configured(struct hid_device *hdev,
+                            struct hid_input *hidinput)
+{
+       struct vivaldi_data *data = hid_get_drvdata(hdev);
+
+       if (!data->num_function_row_keys)
+               return 0;
+
+       return devm_device_add_group(&hdev->dev, &vivaldi_attribute_group);
+}
+EXPORT_SYMBOL_GPL(vivaldi_input_configured);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-vivaldi-common.h b/drivers/hid/hid-vivaldi-common.h
new file mode 100644 (file)
index 0000000..d42e82d
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _HID_VIVALDI_COMMON_H
+#define _HID_VIVALDI_COMMON_H
+
+struct hid_device;
+struct hid_field;
+struct hid_input;
+struct hid_usage;
+
+void vivaldi_feature_mapping(struct hid_device *hdev,
+                            struct hid_field *field, struct hid_usage *usage);
+
+int vivaldi_input_configured(struct hid_device *hdev,
+                            struct hid_input *hidinput);
+
+#endif /* _HID_VIVALDI_COMMON_H */
index 42ceb2058a094e00582f17edde9a73e91657b75a..3a979123e7d33f072be079425bbcfc9a619d92a2 100644 (file)
@@ -8,48 +8,11 @@
 
 #include <linux/device.h>
 #include <linux/hid.h>
+#include <linux/input/vivaldi-fmap.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/sysfs.h>
 
-#define MIN_FN_ROW_KEY 1
-#define MAX_FN_ROW_KEY 24
-#define HID_VD_FN_ROW_PHYSMAP 0x00000001
-#define HID_USAGE_FN_ROW_PHYSMAP (HID_UP_GOOGLEVENDOR | HID_VD_FN_ROW_PHYSMAP)
-
-struct vivaldi_data {
-       u32 function_row_physmap[MAX_FN_ROW_KEY - MIN_FN_ROW_KEY + 1];
-       int max_function_row_key;
-};
-
-static ssize_t function_row_physmap_show(struct device *dev,
-                                        struct device_attribute *attr,
-                                        char *buf)
-{
-       struct hid_device *hdev = to_hid_device(dev);
-       struct vivaldi_data *drvdata = hid_get_drvdata(hdev);
-       ssize_t size = 0;
-       int i;
-
-       if (!drvdata->max_function_row_key)
-               return 0;
-
-       for (i = 0; i < drvdata->max_function_row_key; i++)
-               size += sprintf(buf + size, "%02X ",
-                               drvdata->function_row_physmap[i]);
-       size += sprintf(buf + size, "\n");
-       return size;
-}
-
-static DEVICE_ATTR_RO(function_row_physmap);
-static struct attribute *sysfs_attrs[] = {
-       &dev_attr_function_row_physmap.attr,
-       NULL
-};
-
-static const struct attribute_group input_attribute_group = {
-       .attrs = sysfs_attrs
-};
+#include "hid-vivaldi-common.h"
 
 static int vivaldi_probe(struct hid_device *hdev,
                         const struct hid_device_id *id)
@@ -70,86 +33,8 @@ static int vivaldi_probe(struct hid_device *hdev,
        return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
 }
 
-static void vivaldi_feature_mapping(struct hid_device *hdev,
-                                   struct hid_field *field,
-                                   struct hid_usage *usage)
-{
-       struct vivaldi_data *drvdata = hid_get_drvdata(hdev);
-       struct hid_report *report = field->report;
-       int fn_key;
-       int ret;
-       u32 report_len;
-       u8 *report_data, *buf;
-
-       if (field->logical != HID_USAGE_FN_ROW_PHYSMAP ||
-           (usage->hid & HID_USAGE_PAGE) != HID_UP_ORDINAL)
-               return;
-
-       fn_key = (usage->hid & HID_USAGE);
-       if (fn_key < MIN_FN_ROW_KEY || fn_key > MAX_FN_ROW_KEY)
-               return;
-       if (fn_key > drvdata->max_function_row_key)
-               drvdata->max_function_row_key = fn_key;
-
-       report_data = buf = hid_alloc_report_buf(report, GFP_KERNEL);
-       if (!report_data)
-               return;
-
-       report_len = hid_report_len(report);
-       if (!report->id) {
-               /*
-                * hid_hw_raw_request() will stuff report ID (which will be 0)
-                * into the first byte of the buffer even for unnumbered
-                * reports, so we need to account for this to avoid getting
-                * -EOVERFLOW in return.
-                * Note that hid_alloc_report_buf() adds 7 bytes to the size
-                * so we can safely say that we have space for an extra byte.
-                */
-               report_len++;
-       }
-
-       ret = hid_hw_raw_request(hdev, report->id, report_data,
-                                report_len, HID_FEATURE_REPORT,
-                                HID_REQ_GET_REPORT);
-       if (ret < 0) {
-               dev_warn(&hdev->dev, "failed to fetch feature %d\n",
-                        field->report->id);
-               goto out;
-       }
-
-       if (!report->id) {
-               /*
-                * Undo the damage from hid_hw_raw_request() for unnumbered
-                * reports.
-                */
-               report_data++;
-               report_len--;
-       }
-
-       ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, report_data,
-                                  report_len, 0);
-       if (ret) {
-               dev_warn(&hdev->dev, "failed to report feature %d\n",
-                        field->report->id);
-               goto out;
-       }
-
-       drvdata->function_row_physmap[fn_key - MIN_FN_ROW_KEY] =
-           field->value[usage->usage_index];
-
-out:
-       kfree(buf);
-}
-
-static int vivaldi_input_configured(struct hid_device *hdev,
-                                   struct hid_input *hidinput)
-{
-       return devm_device_add_group(&hdev->dev, &input_attribute_group);
-}
-
 static const struct hid_device_id vivaldi_table[] = {
-       { HID_DEVICE(HID_BUS_ANY, HID_GROUP_VIVALDI, HID_ANY_ID,
-                    HID_ANY_ID) },
+       { HID_DEVICE(HID_BUS_ANY, HID_GROUP_VIVALDI, HID_ANY_ID, HID_ANY_ID) },
        { }
 };
 
index 26d269ba947c85c5a87fe4d9ba1743ca6ccaa348..85a2142c9384007d2bc43a1f072ceb7b96b0c53b 100644 (file)
@@ -380,7 +380,7 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel)
         * execute:
         *
         *  (a) In the "normal (i.e., not resuming from hibernation)" path,
-        *      the full barrier in smp_store_mb() guarantees that the store
+        *      the full barrier in virt_store_mb() guarantees that the store
         *      is propagated to all CPUs before the add_channel_work work
         *      is queued.  In turn, add_channel_work is queued before the
         *      channel's ring buffer is allocated/initialized and the
@@ -392,14 +392,14 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel)
         *      recv_int_page before retrieving the channel pointer from the
         *      array of channels.
         *
-        *  (b) In the "resuming from hibernation" path, the smp_store_mb()
+        *  (b) In the "resuming from hibernation" path, the virt_store_mb()
         *      guarantees that the store is propagated to all CPUs before
         *      the VMBus connection is marked as ready for the resume event
         *      (cf. check_ready_for_resume_event()).  The interrupt handler
         *      of the VMBus driver and vmbus_chan_sched() can not run before
         *      vmbus_bus_resume() has completed execution (cf. resume_noirq).
         */
-       smp_store_mb(
+       virt_store_mb(
                vmbus_connection.channels[channel->offermsg.child_relid],
                channel);
 }
index 439f99b8b5de269bdc2c0574d4eaae2521e9d50f..3248b48f37f6128aa1dd58ff6db3582edaa5f362 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/slab.h>
 #include <linux/kthread.h>
 #include <linux/completion.h>
+#include <linux/count_zeros.h>
 #include <linux/memory_hotplug.h>
 #include <linux/memory.h>
 #include <linux/notifier.h>
@@ -1130,6 +1131,7 @@ static void post_status(struct hv_dynmem_device *dm)
        struct dm_status status;
        unsigned long now = jiffies;
        unsigned long last_post = last_post_time;
+       unsigned long num_pages_avail, num_pages_committed;
 
        if (pressure_report_delay > 0) {
                --pressure_report_delay;
@@ -1154,16 +1156,21 @@ static void post_status(struct hv_dynmem_device *dm)
         * num_pages_onlined) as committed to the host, otherwise it can try
         * asking us to balloon them out.
         */
-       status.num_avail = si_mem_available();
-       status.num_committed = vm_memory_committed() +
+       num_pages_avail = si_mem_available();
+       num_pages_committed = vm_memory_committed() +
                dm->num_pages_ballooned +
                (dm->num_pages_added > dm->num_pages_onlined ?
                 dm->num_pages_added - dm->num_pages_onlined : 0) +
                compute_balloon_floor();
 
-       trace_balloon_status(status.num_avail, status.num_committed,
+       trace_balloon_status(num_pages_avail, num_pages_committed,
                             vm_memory_committed(), dm->num_pages_ballooned,
                             dm->num_pages_added, dm->num_pages_onlined);
+
+       /* Convert numbers of pages into numbers of HV_HYP_PAGEs. */
+       status.num_avail = num_pages_avail * NR_HV_HYP_PAGES_IN_PAGE;
+       status.num_committed = num_pages_committed * NR_HV_HYP_PAGES_IN_PAGE;
+
        /*
         * If our transaction ID is no longer current, just don't
         * send the status. This can happen if we were interrupted
@@ -1653,6 +1660,38 @@ static void disable_page_reporting(void)
        }
 }
 
+static int ballooning_enabled(void)
+{
+       /*
+        * Disable ballooning if the page size is not 4k (HV_HYP_PAGE_SIZE),
+        * since currently it's unclear to us whether an unballoon request can
+        * make sure all page ranges are guest page size aligned.
+        */
+       if (PAGE_SIZE != HV_HYP_PAGE_SIZE) {
+               pr_info("Ballooning disabled because page size is not 4096 bytes\n");
+               return 0;
+       }
+
+       return 1;
+}
+
+static int hot_add_enabled(void)
+{
+       /*
+        * Disable hot add on ARM64, because we currently rely on
+        * memory_add_physaddr_to_nid() to get a node id of a hot add range,
+        * however ARM64's memory_add_physaddr_to_nid() always return 0 and
+        * DM_MEM_HOT_ADD_REQUEST doesn't have the NUMA node information for
+        * add_memory().
+        */
+       if (IS_ENABLED(CONFIG_ARM64)) {
+               pr_info("Memory hot add disabled on ARM64\n");
+               return 0;
+       }
+
+       return 1;
+}
+
 static int balloon_connect_vsp(struct hv_device *dev)
 {
        struct dm_version_request version_req;
@@ -1724,8 +1763,8 @@ static int balloon_connect_vsp(struct hv_device *dev)
         * currently still requires the bits to be set, so we have to add code
         * to fail the host's hot-add and balloon up/down requests, if any.
         */
-       cap_msg.caps.cap_bits.balloon = 1;
-       cap_msg.caps.cap_bits.hot_add = 1;
+       cap_msg.caps.cap_bits.balloon = ballooning_enabled();
+       cap_msg.caps.cap_bits.hot_add = hot_add_enabled();
 
        /*
         * Specify our alignment requirements as it relates
index c1dd21d0d7ef8d1ba96523b602ba71a48f0963b1..ae68298c0dcac59add8948c0510499b6958ea434 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/panic_notifier.h>
 #include <linux/ptrace.h>
 #include <linux/slab.h>
+#include <linux/dma-map-ops.h>
 #include <asm/hyperv-tlfs.h>
 #include <asm/mshyperv.h>
 
@@ -218,6 +219,16 @@ bool hv_query_ext_cap(u64 cap_query)
 }
 EXPORT_SYMBOL_GPL(hv_query_ext_cap);
 
+void hv_setup_dma_ops(struct device *dev, bool coherent)
+{
+       /*
+        * Hyper-V does not offer a vIOMMU in the guest
+        * VM, so pass 0/NULL for the IOMMU settings
+        */
+       arch_setup_dma_ops(dev, 0, 0, NULL, coherent);
+}
+EXPORT_SYMBOL_GPL(hv_setup_dma_ops);
+
 bool hv_is_hibernation_supported(void)
 {
        return !hv_root_partition && acpi_sleep_state_supported(ACPI_STATE_S4);
index 71efacb90965946f12e6513efc0a9f05bd8ba89f..3d215d9dec433b33417f69de5513c1b231a27e36 100644 (file)
@@ -439,7 +439,16 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
 static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
 {
        u32 priv_read_loc = rbi->priv_read_index;
-       u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
+       u32 write_loc;
+
+       /*
+        * The Hyper-V host writes the packet data, then uses
+        * store_release() to update the write_index.  Use load_acquire()
+        * here to prevent loads of the packet data from being re-ordered
+        * before the read of the write_index and potentially getting
+        * stale data.
+        */
+       write_loc = virt_load_acquire(&rbi->ring_buffer->write_index);
 
        if (write_loc >= priv_read_loc)
                return write_loc - priv_read_loc;
index 60ee8b329f9e374787d20b6e4f33831fdbad0426..14de17087864ac3a0d1efa475d8a44fb4d684cae 100644 (file)
@@ -77,8 +77,8 @@ static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
 
        /*
         * Hyper-V should be notified only once about a panic.  If we will be
-        * doing hyperv_report_panic_msg() later with kmsg data, don't do
-        * the notification here.
+        * doing hv_kmsg_dump() with kmsg data later, don't do the notification
+        * here.
         */
        if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
            && hyperv_report_reg()) {
@@ -100,8 +100,8 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
 
        /*
         * Hyper-V should be notified only once about a panic.  If we will be
-        * doing hyperv_report_panic_msg() later with kmsg data, don't do
-        * the notification here.
+        * doing hv_kmsg_dump() with kmsg data later, don't do the notification
+        * here.
         */
        if (hyperv_report_reg())
                hyperv_report_panic(regs, val, true);
@@ -920,6 +920,21 @@ static int vmbus_probe(struct device *child_device)
        return ret;
 }
 
+/*
+ * vmbus_dma_configure -- Configure DMA coherence for VMbus device
+ */
+static int vmbus_dma_configure(struct device *child_device)
+{
+       /*
+        * On ARM64, propagate the DMA coherence setting from the top level
+        * VMbus ACPI device to the child VMbus device being added here.
+        * On x86/x64 coherence is assumed and these calls have no effect.
+        */
+       hv_setup_dma_ops(child_device,
+               device_get_dma_attr(&hv_acpi_dev->dev) == DEV_DMA_COHERENT);
+       return 0;
+}
+
 /*
  * vmbus_remove - Remove a vmbus device
  */
@@ -1040,6 +1055,7 @@ static struct bus_type  hv_bus = {
        .remove =               vmbus_remove,
        .probe =                vmbus_probe,
        .uevent =               vmbus_uevent,
+       .dma_configure =        vmbus_dma_configure,
        .dev_groups =           vmbus_dev_groups,
        .drv_groups =           vmbus_drv_groups,
        .bus_groups =           vmbus_bus_groups,
@@ -1546,14 +1562,20 @@ static int vmbus_bus_init(void)
        if (ret)
                goto err_connect;
 
+       if (hv_is_isolation_supported())
+               sysctl_record_panic_msg = 0;
+
        /*
         * Only register if the crash MSRs are available
         */
        if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
                u64 hyperv_crash_ctl;
                /*
-                * Sysctl registration is not fatal, since by default
-                * reporting is enabled.
+                * Panic message recording (sysctl_record_panic_msg)
+                * is enabled by default in non-isolated guests and
+                * disabled by default in isolated guests; the panic
+                * message recording won't be available in isolated
+                * guests should the following registration fail.
                 */
                hv_ctl_table_hdr = register_sysctl_table(hv_root_table);
                if (!hv_ctl_table_hdr)
@@ -2097,6 +2119,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
        child_device_obj->device.parent = &hv_acpi_dev->dev;
        child_device_obj->device.release = vmbus_device_release;
 
+       child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
+       child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
+       dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
+
        /*
         * Register with the LDM. This will kick off the driver/device
         * binding...which will eventually call vmbus_match() and vmbus_probe()
@@ -2122,9 +2148,6 @@ int vmbus_device_register(struct hv_device *child_device_obj)
        }
        hv_debug_add_dev_dir(child_device_obj);
 
-       child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
-       child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
-       dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
        return 0;
 
 err_kset_unregister:
@@ -2428,6 +2451,21 @@ static int vmbus_acpi_add(struct acpi_device *device)
 
        hv_acpi_dev = device;
 
+       /*
+        * Older versions of Hyper-V for ARM64 fail to include the _CCA
+        * method on the top level VMbus device in the DSDT. But devices
+        * are hardware coherent in all current Hyper-V use cases, so fix
+        * up the ACPI device to behave as if _CCA is present and indicates
+        * hardware coherence.
+        */
+       ACPI_COMPANION_SET(&device->dev, device);
+       if (IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED) &&
+           device_get_dma_attr(&device->dev) == DEV_DMA_NOT_SUPPORTED) {
+               pr_info("No ACPI _CCA found; assuming coherent device I/O\n");
+               device->flags.cca_seen = true;
+               device->flags.coherent_dma = true;
+       }
+
        result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
                                        vmbus_walk_resources, NULL);
 
@@ -2780,10 +2818,15 @@ static void __exit vmbus_exit(void)
        if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
                kmsg_dump_unregister(&hv_kmsg_dumper);
                unregister_die_notifier(&hyperv_die_block);
-               atomic_notifier_chain_unregister(&panic_notifier_list,
-                                                &hyperv_panic_block);
        }
 
+       /*
+        * The panic notifier is always registered, hence we should
+        * also unconditionally unregister it here as well.
+        */
+       atomic_notifier_chain_unregister(&panic_notifier_list,
+                                        &hyperv_panic_block);
+
        free_page((unsigned long)hv_panic_page);
        unregister_sysctl_table(hv_ctl_table_hdr);
        hv_ctl_table_hdr = NULL;
index 27f969b3dc072b1946775e5209e3e1b2f80bcfd4..e9e2db68b9fb62f1bc3e47bf0770827667cc5063 100644 (file)
@@ -179,6 +179,12 @@ struct imx_i2c_hwdata {
        unsigned int            ndivs;
        unsigned int            i2sr_clr_opcode;
        unsigned int            i2cr_ien_opcode;
+       /*
+        * Errata ERR007805 or e7805:
+        * I2C: When the I2C clock speed is configured for 400 kHz,
+        * the SCL low period violates the I2C spec of 1.3 uS min.
+        */
+       bool                    has_err007805;
 };
 
 struct imx_i2c_dma {
@@ -240,6 +246,16 @@ static const struct imx_i2c_hwdata imx21_i2c_hwdata = {
 
 };
 
+static const struct imx_i2c_hwdata imx6_i2c_hwdata = {
+       .devtype                = IMX21_I2C,
+       .regshift               = IMX_I2C_REGSHIFT,
+       .clk_div                = imx_i2c_clk_div,
+       .ndivs                  = ARRAY_SIZE(imx_i2c_clk_div),
+       .i2sr_clr_opcode        = I2SR_CLR_OPCODE_W0C,
+       .i2cr_ien_opcode        = I2CR_IEN_OPCODE_1,
+       .has_err007805          = true,
+};
+
 static struct imx_i2c_hwdata vf610_i2c_hwdata = {
        .devtype                = VF610_I2C,
        .regshift               = VF610_I2C_REGSHIFT,
@@ -266,6 +282,16 @@ MODULE_DEVICE_TABLE(platform, imx_i2c_devtype);
 static const struct of_device_id i2c_imx_dt_ids[] = {
        { .compatible = "fsl,imx1-i2c", .data = &imx1_i2c_hwdata, },
        { .compatible = "fsl,imx21-i2c", .data = &imx21_i2c_hwdata, },
+       { .compatible = "fsl,imx6q-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx6sl-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx6sll-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx6sx-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx6ul-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx7s-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx8mm-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx8mn-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx8mp-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx8mq-i2c", .data = &imx6_i2c_hwdata, },
        { .compatible = "fsl,vf610-i2c", .data = &vf610_i2c_hwdata, },
        { /* sentinel */ }
 };
@@ -551,6 +577,13 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
        unsigned int div;
        int i;
 
+       if (i2c_imx->hwdata->has_err007805 && i2c_imx->bitrate > 384000) {
+               dev_dbg(&i2c_imx->adapter.dev,
+                       "SoC errata ERR007805 or e7805 applies, bus frequency limited from %d Hz to 384000 Hz.\n",
+                       i2c_imx->bitrate);
+               i2c_imx->bitrate = 384000;
+       }
+
        /* Divider value calculation */
        if (i2c_imx->cur_clk == i2c_clk_rate)
                return;
index f4820fd3dc13ebe7f23547ee039f049117064b89..c0364314877ec6503e410b3e8dc01472c3404849 100644 (file)
 #define ISMT_SPGT_SPD_MASK     0xc0000000      /* SMBus Speed mask */
 #define ISMT_SPGT_SPD_80K      0x00            /* 80 kHz */
 #define ISMT_SPGT_SPD_100K     (0x1 << 30)     /* 100 kHz */
-#define ISMT_SPGT_SPD_400K     (0x2 << 30)     /* 400 kHz */
-#define ISMT_SPGT_SPD_1M       (0x3 << 30)     /* 1 MHz */
+#define ISMT_SPGT_SPD_400K     (0x2U << 30)    /* 400 kHz */
+#define ISMT_SPGT_SPD_1M       (0x3U << 30)    /* 1 MHz */
 
 
 /* MSI Control Register (MSICTL) bit definitions */
index 7728c8460dc0fd3744a46ecaf0fb088c625a1481..9028ffb58cc079697f6acaed7714cba5cf0bbf27 100644 (file)
@@ -137,6 +137,12 @@ static int pasemi_i2c_xfer_msg(struct i2c_adapter *adapter,
 
                TXFIFO_WR(smbus, msg->buf[msg->len-1] |
                          (stop ? MTXFIFO_STOP : 0));
+
+               if (stop) {
+                       err = pasemi_smb_waitready(smbus);
+                       if (err)
+                               goto reset_out;
+               }
        }
 
        return 0;
index fc1dcc19f2a170f16636e96bea37c24cfbdda9c0..5b920f0fc7dd79c5033d3182cbbc27f46e0bdb64 100644 (file)
@@ -843,10 +843,8 @@ static int geni_i2c_probe(struct platform_device *pdev)
                /* FIFO is disabled, so we can only use GPI DMA */
                gi2c->gpi_mode = true;
                ret = setup_gpi_dma(gi2c);
-               if (ret) {
-                       dev_err(dev, "Failed to setup GPI DMA mode:%d ret\n", ret);
-                       return ret;
-               }
+               if (ret)
+                       return dev_err_probe(dev, ret, "Failed to setup GPI DMA mode\n");
 
                dev_dbg(dev, "Using GPI DMA mode for I2C\n");
        } else {
index cf5d049342ead2ca707475196083b813b0ccb5af..ab0adaa130dae31da7658798481694628af270af 100644 (file)
@@ -557,7 +557,7 @@ static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned lo
                                .addr = umsg.addr,
                                .flags = umsg.flags,
                                .len = umsg.len,
-                               .buf = compat_ptr(umsg.buf)
+                               .buf = (__force __u8 *)compat_ptr(umsg.buf),
                        };
                }
 
@@ -668,16 +668,21 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
        i2c_dev->dev.class = i2c_dev_class;
        i2c_dev->dev.parent = &adap->dev;
        i2c_dev->dev.release = i2cdev_dev_release;
-       dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
+
+       res = dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
+       if (res)
+               goto err_put_i2c_dev;
 
        res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev);
-       if (res) {
-               put_i2c_dev(i2c_dev, false);
-               return res;
-       }
+       if (res)
+               goto err_put_i2c_dev;
 
        pr_debug("adapter [%s] registered as minor %d\n", adap->name, adap->nr);
        return 0;
+
+err_put_i2c_dev:
+       put_i2c_dev(i2c_dev, false);
+       return res;
 }
 
 static int i2cdev_detach_adapter(struct device *dev, void *dummy)
index 35f0d5e7533d60b6038d033095053bdf18e61aa7..1c107d6d03b990d9ea0a33dc4bb1e634b073ed87 100644 (file)
@@ -2824,6 +2824,7 @@ static int cm_dreq_handler(struct cm_work *work)
        switch (cm_id_priv->id.state) {
        case IB_CM_REP_SENT:
        case IB_CM_DREQ_SENT:
+       case IB_CM_MRA_REP_RCVD:
                ib_cancel_mad(cm_id_priv->msg);
                break;
        case IB_CM_ESTABLISHED:
@@ -2831,8 +2832,6 @@ static int cm_dreq_handler(struct cm_work *work)
                    cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
                        ib_cancel_mad(cm_id_priv->msg);
                break;
-       case IB_CM_MRA_REP_RCVD:
-               break;
        case IB_CM_TIMEWAIT:
                atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
                                                     [CM_DREQ_COUNTER]);
index 876cc78a22cca16e2ff184fc73daf0a1e89dc9e0..7333646021bb809d4b60908456eb9f01c5f1ab4f 100644 (file)
@@ -80,6 +80,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
        unsigned long flags;
        struct list_head del_list;
 
+       /* Prevent freeing of mm until we are completely finished. */
+       mmgrab(handler->mn.mm);
+
        /* Unregister first so we don't get any more notifications. */
        mmu_notifier_unregister(&handler->mn, handler->mn.mm);
 
@@ -102,6 +105,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
 
        do_remove(handler, &del_list);
 
+       /* Now the mm may be freed. */
+       mmdrop(handler->mn.mm);
+
        kfree(handler);
 }
 
index 956f8e875daa517d84314ced52ba3e58aa06b85d..32ef67e9a6a7295330b5beaeab832574abc307c9 100644 (file)
@@ -574,8 +574,10 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
                spin_lock_irq(&ent->lock);
                if (ent->disabled)
                        goto out;
-               if (need_delay)
+               if (need_delay) {
                        queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
+                       goto out;
+               }
                remove_cache_mr_locked(ent);
                queue_adjust_cache_locked(ent);
        }
@@ -625,6 +627,7 @@ static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 {
        struct mlx5_cache_ent *ent = mr->cache_ent;
 
+       WRITE_ONCE(dev->cache.last_add, jiffies);
        spin_lock_irq(&ent->lock);
        list_add_tail(&mr->list, &ent->head);
        ent->available_mrs++;
index ae50b56e891321fb8bf98b7c2b28bd626b5169c5..8ef112f883a772db51f1b218990408db3b934b1a 100644 (file)
@@ -3190,7 +3190,11 @@ serr_no_r_lock:
        spin_lock_irqsave(&sqp->s_lock, flags);
        rvt_send_complete(sqp, wqe, send_status);
        if (sqp->ibqp.qp_type == IB_QPT_RC) {
-               int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
+               int lastwqe;
+
+               spin_lock(&sqp->r_lock);
+               lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
+               spin_unlock(&sqp->r_lock);
 
                sqp->s_flags &= ~RVT_S_BUSY;
                spin_unlock_irqrestore(&sqp->s_lock, flags);
index 5baebf62df3305ae869c1ab57cfdca96c2f8d7d4..e2752f7364bcfe8e687710926846fe175d86e4da 100644 (file)
@@ -77,6 +77,13 @@ config INPUT_MATRIXKMAP
          To compile this driver as a module, choose M here: the
          module will be called matrix-keymap.
 
+config INPUT_VIVALDIFMAP
+       tristate
+       help
+         ChromeOS Vivaldi keymap support library. This is a hidden
+         option so that drivers can use common code to parse and
+         expose the vivaldi function row keymap.
+
 comment "Userland interfaces"
 
 config INPUT_MOUSEDEV
index 037cc595106c0f6b2d88557b0106660910e642e4..2266c7d010efca94d348881dcb40233422508e51 100644 (file)
@@ -12,6 +12,7 @@ input-core-y += touchscreen.o
 obj-$(CONFIG_INPUT_FF_MEMLESS) += ff-memless.o
 obj-$(CONFIG_INPUT_SPARSEKMAP) += sparse-keymap.o
 obj-$(CONFIG_INPUT_MATRIXKMAP) += matrix-keymap.o
+obj-$(CONFIG_INPUT_VIVALDIFMAP)        += vivaldi-fmap.o
 
 obj-$(CONFIG_INPUT_LEDS)       += input-leds.o
 obj-$(CONFIG_INPUT_MOUSEDEV)   += mousedev.o
index c3139bc2aa0db106f8a63cced11051a98c0ecbc5..e5a668ce884d6d7626ea1984ccd79decc40aceb6 100644 (file)
@@ -47,6 +47,17 @@ static DEFINE_MUTEX(input_mutex);
 
 static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 };
 
+static const unsigned int input_max_code[EV_CNT] = {
+       [EV_KEY] = KEY_MAX,
+       [EV_REL] = REL_MAX,
+       [EV_ABS] = ABS_MAX,
+       [EV_MSC] = MSC_MAX,
+       [EV_SW] = SW_MAX,
+       [EV_LED] = LED_MAX,
+       [EV_SND] = SND_MAX,
+       [EV_FF] = FF_MAX,
+};
+
 static inline int is_event_supported(unsigned int code,
                                     unsigned long *bm, unsigned int max)
 {
@@ -511,6 +522,9 @@ void input_set_abs_params(struct input_dev *dev, unsigned int axis,
 {
        struct input_absinfo *absinfo;
 
+       __set_bit(EV_ABS, dev->evbit);
+       __set_bit(axis, dev->absbit);
+
        input_alloc_absinfo(dev);
        if (!dev->absinfo)
                return;
@@ -520,12 +534,45 @@ void input_set_abs_params(struct input_dev *dev, unsigned int axis,
        absinfo->maximum = max;
        absinfo->fuzz = fuzz;
        absinfo->flat = flat;
-
-       __set_bit(EV_ABS, dev->evbit);
-       __set_bit(axis, dev->absbit);
 }
 EXPORT_SYMBOL(input_set_abs_params);
 
+/**
+ * input_copy_abs - Copy absinfo from one input_dev to another
+ * @dst: Destination input device to copy the abs settings to
+ * @dst_axis: ABS_* value selecting the destination axis
+ * @src: Source input device to copy the abs settings from
+ * @src_axis: ABS_* value selecting the source axis
+ *
+ * Set absinfo for the selected destination axis by copying it from
+ * the specified source input device's source axis.
+ * This is useful to e.g. setup a pen/stylus input-device for combined
+ * touchscreen/pen hardware where the pen uses the same coordinates as
+ * the touchscreen.
+ */
+void input_copy_abs(struct input_dev *dst, unsigned int dst_axis,
+                   const struct input_dev *src, unsigned int src_axis)
+{
+       /* src must have EV_ABS and src_axis set */
+       if (WARN_ON(!(test_bit(EV_ABS, src->evbit) &&
+                     test_bit(src_axis, src->absbit))))
+               return;
+
+       /*
+        * input_alloc_absinfo() may have failed for the source. Our caller is
+        * expected to catch this when registering the input devices, which may
+        * happen after the input_copy_abs() call.
+        */
+       if (!src->absinfo)
+               return;
+
+       input_set_capability(dst, EV_ABS, dst_axis);
+       if (!dst->absinfo)
+               return;
+
+       dst->absinfo[dst_axis] = src->absinfo[src_axis];
+}
+EXPORT_SYMBOL(input_copy_abs);
 
 /**
  * input_grab_device - grabs device for exclusive use
@@ -2074,6 +2121,14 @@ EXPORT_SYMBOL(input_get_timestamp);
  */
 void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code)
 {
+       if (type < EV_CNT && input_max_code[type] &&
+           code > input_max_code[type]) {
+               pr_err("%s: invalid code %u for type %u\n", __func__, code,
+                      type);
+               dump_stack();
+               return;
+       }
+
        switch (type) {
        case EV_KEY:
                __set_bit(code, dev->keybit);
@@ -2085,9 +2140,6 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
 
        case EV_ABS:
                input_alloc_absinfo(dev);
-               if (!dev->absinfo)
-                       return;
-
                __set_bit(code, dev->absbit);
                break;
 
@@ -2285,12 +2337,6 @@ int input_register_device(struct input_dev *dev)
        /* KEY_RESERVED is not supposed to be transmitted to userspace. */
        __clear_bit(KEY_RESERVED, dev->keybit);
 
-       /* Buttonpads should not map BTN_RIGHT and/or BTN_MIDDLE. */
-       if (test_bit(INPUT_PROP_BUTTONPAD, dev->propbit)) {
-               __clear_bit(BTN_RIGHT, dev->keybit);
-               __clear_bit(BTN_MIDDLE, dev->keybit);
-       }
-
        /* Make sure that bitmasks not mentioned in dev->evbit are clean. */
        input_cleanse_bitmasks(dev);
 
index 592c95b87f54786e5726ec9d7350dc2dae283312..e10d57bf1180c7c2eb06824ac31d71239a14ae88 100644 (file)
@@ -123,7 +123,7 @@ static void adi_read_packet(struct adi_port *port)
 {
        struct adi *adi = port->adi;
        struct gameport *gameport = port->gameport;
-       unsigned char u, v, w, x, z;
+       unsigned char u, v, w, x;
        int t[2], s[2], i;
        unsigned long flags;
 
@@ -136,7 +136,7 @@ static void adi_read_packet(struct adi_port *port)
        local_irq_save(flags);
 
        gameport_trigger(gameport);
-       v = z = gameport_read(gameport);
+       v = gameport_read(gameport);
 
        do {
                u = v;
index 4c914f75a9027db503db69d9a04443059c8b5712..18190b529bca3fdf81b6b9e8aee9e11d0ec7da16 100644 (file)
@@ -131,7 +131,7 @@ static const struct xpad_device {
        { 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", 0, XTYPE_XBOXONE },
        { 0x045e, 0x02ea, "Microsoft X-Box One S pad", 0, XTYPE_XBOXONE },
        { 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
-       { 0x045e, 0x0b12, "Microsoft Xbox One X pad", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
+       { 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
        { 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 },
        { 0x046d, 0xc21e, "Logitech Gamepad F510", 0, XTYPE_XBOX360 },
        { 0x046d, 0xc21f, "Logitech Gamepad F710", 0, XTYPE_XBOX360 },
index 9417ee0b1eff8fd6c45883c89806eb2e9297c337..4ea79db8f134becd2a0e6938a2e27ea951e59927 100644 (file)
@@ -103,6 +103,7 @@ config KEYBOARD_ATKBD
        select SERIO_LIBPS2
        select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
        select SERIO_GSCPS2 if GSC
+       select INPUT_VIVALDIFMAP
        help
          Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
          you'll need this, unless you have a different type keyboard (USB, ADB
@@ -749,6 +750,7 @@ config KEYBOARD_XTKBD
 config KEYBOARD_CROS_EC
        tristate "ChromeOS EC keyboard"
        select INPUT_MATRIXKMAP
+       select INPUT_VIVALDIFMAP
        depends on CROS_EC
        help
          Say Y here to enable the matrix keyboard used by ChromeOS devices
@@ -779,6 +781,18 @@ config KEYBOARD_BCM
          To compile this driver as a module, choose M here: the
          module will be called bcm-keypad.
 
+config KEYBOARD_MT6779
+       tristate "MediaTek Keypad Support"
+       depends on ARCH_MEDIATEK || COMPILE_TEST
+       select REGMAP_MMIO
+       select INPUT_MATRIXKMAP
+       help
+         Say Y here if you want to use the keypad on MediaTek SoCs.
+         If unsure, say N.
+
+         To compile this driver as a module, choose M here: the
+         module will be called mt6779-keypad.
+
 config KEYBOARD_MTK_PMIC
        tristate "MediaTek PMIC keys support"
        depends on MFD_MT6397
index e3c8648f834ef480335b918a22413f17fa6aa160..721936e9029002c5fe341b55c6647bd1f4e4efad 100644 (file)
@@ -44,6 +44,7 @@ obj-$(CONFIG_KEYBOARD_MATRIX)         += matrix_keypad.o
 obj-$(CONFIG_KEYBOARD_MAX7359)         += max7359_keypad.o
 obj-$(CONFIG_KEYBOARD_MCS)             += mcs_touchkey.o
 obj-$(CONFIG_KEYBOARD_MPR121)          += mpr121_touchkey.o
+obj-$(CONFIG_KEYBOARD_MT6779)          += mt6779-keypad.o
 obj-$(CONFIG_KEYBOARD_MTK_PMIC)        += mtk-pmic-keys.o
 obj-$(CONFIG_KEYBOARD_NEWTON)          += newtonkbd.o
 obj-$(CONFIG_KEYBOARD_NOMADIK)         += nomadik-ske-keypad.o
index fbdef95291e90d70e9127b83701994d8a0850190..d4131236d18c5442d000e8bb568e8c9161b49a7c 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/input.h>
+#include <linux/input/vivaldi-fmap.h>
 #include <linux/serio.h>
 #include <linux/workqueue.h>
 #include <linux/libps2.h>
@@ -64,8 +65,6 @@ static bool atkbd_terminal;
 module_param_named(terminal, atkbd_terminal, bool, 0);
 MODULE_PARM_DESC(terminal, "Enable break codes on an IBM Terminal keyboard connected via AT/PS2");
 
-#define MAX_FUNCTION_ROW_KEYS  24
-
 #define SCANCODE(keymap)       ((keymap >> 16) & 0xFFFF)
 #define KEYCODE(keymap)                (keymap & 0xFFFF)
 
@@ -237,8 +236,7 @@ struct atkbd {
        /* Serializes reconnect(), attr->set() and event work */
        struct mutex mutex;
 
-       u32 function_row_physmap[MAX_FUNCTION_ROW_KEYS];
-       int num_function_row_keys;
+       struct vivaldi_data vdata;
 };
 
 /*
@@ -308,17 +306,7 @@ static struct attribute *atkbd_attributes[] = {
 
 static ssize_t atkbd_show_function_row_physmap(struct atkbd *atkbd, char *buf)
 {
-       ssize_t size = 0;
-       int i;
-
-       if (!atkbd->num_function_row_keys)
-               return 0;
-
-       for (i = 0; i < atkbd->num_function_row_keys; i++)
-               size += scnprintf(buf + size, PAGE_SIZE - size, "%02X ",
-                                 atkbd->function_row_physmap[i]);
-       size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
-       return size;
+       return vivaldi_function_row_physmap_show(&atkbd->vdata, buf);
 }
 
 static umode_t atkbd_attr_is_visible(struct kobject *kobj,
@@ -329,7 +317,7 @@ static umode_t atkbd_attr_is_visible(struct kobject *kobj,
        struct atkbd *atkbd = serio_get_drvdata(serio);
 
        if (attr == &atkbd_attr_function_row_physmap.attr &&
-           !atkbd->num_function_row_keys)
+           !atkbd->vdata.num_function_row_keys)
                return 0;
 
        return attr->mode;
@@ -1206,10 +1194,11 @@ static void atkbd_parse_fwnode_data(struct serio *serio)
 
        /* Parse "function-row-physmap" property */
        n = device_property_count_u32(dev, "function-row-physmap");
-       if (n > 0 && n <= MAX_FUNCTION_ROW_KEYS &&
+       if (n > 0 && n <= VIVALDI_MAX_FUNCTION_ROW_KEYS &&
            !device_property_read_u32_array(dev, "function-row-physmap",
-                                           atkbd->function_row_physmap, n)) {
-               atkbd->num_function_row_keys = n;
+                                           atkbd->vdata.function_row_physmap,
+                                           n)) {
+               atkbd->vdata.num_function_row_keys = n;
                dev_dbg(dev, "FW reported %d function-row key locations\n", n);
        }
 }
index fc02c540636e7204b35d7366ed1488db0a47f24f..6534dfca60b4d06e29f0c446df5f143a1149c7e6 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/bitops.h>
 #include <linux/i2c.h>
 #include <linux/input.h>
+#include <linux/input/vivaldi-fmap.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/notifier.h>
@@ -27,8 +28,6 @@
 
 #include <asm/unaligned.h>
 
-#define MAX_NUM_TOP_ROW_KEYS   15
-
 /**
  * struct cros_ec_keyb - Structure representing EC keyboard device
  *
@@ -44,9 +43,7 @@
  * @idev: The input device for the matrix keys.
  * @bs_idev: The input device for non-matrix buttons and switches (or NULL).
  * @notifier: interrupt event notifier for transport devices
- * @function_row_physmap: An array of the encoded rows/columns for the top
- *                        row function keys, in an order from left to right
- * @num_function_row_keys: The number of top row keys in a custom keyboard
+ * @vdata: vivaldi function row data
  */
 struct cros_ec_keyb {
        unsigned int rows;
@@ -64,8 +61,7 @@ struct cros_ec_keyb {
        struct input_dev *bs_idev;
        struct notifier_block notifier;
 
-       u16 function_row_physmap[MAX_NUM_TOP_ROW_KEYS];
-       size_t num_function_row_keys;
+       struct vivaldi_data vdata;
 };
 
 /**
@@ -537,9 +533,9 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
        int err;
        struct property *prop;
        const __be32 *p;
-       u16 *physmap;
+       u32 *physmap;
        u32 key_pos;
-       int row, col;
+       unsigned int row, col, scancode, n_physmap;
 
        err = matrix_keypad_parse_properties(dev, &ckdev->rows, &ckdev->cols);
        if (err)
@@ -591,20 +587,21 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
        ckdev->idev = idev;
        cros_ec_keyb_compute_valid_keys(ckdev);
 
-       physmap = ckdev->function_row_physmap;
+       physmap = ckdev->vdata.function_row_physmap;
+       n_physmap = 0;
        of_property_for_each_u32(dev->of_node, "function-row-physmap",
                                 prop, p, key_pos) {
-               if (ckdev->num_function_row_keys == MAX_NUM_TOP_ROW_KEYS) {
+               if (n_physmap == VIVALDI_MAX_FUNCTION_ROW_KEYS) {
                        dev_warn(dev, "Only support up to %d top row keys\n",
-                                MAX_NUM_TOP_ROW_KEYS);
+                                VIVALDI_MAX_FUNCTION_ROW_KEYS);
                        break;
                }
                row = KEY_ROW(key_pos);
                col = KEY_COL(key_pos);
-               *physmap = MATRIX_SCAN_CODE(row, col, ckdev->row_shift);
-               physmap++;
-               ckdev->num_function_row_keys++;
+               scancode = MATRIX_SCAN_CODE(row, col, ckdev->row_shift);
+               physmap[n_physmap++] = scancode;
        }
+       ckdev->vdata.num_function_row_keys = n_physmap;
 
        err = input_register_device(ckdev->idev);
        if (err) {
@@ -619,18 +616,10 @@ static ssize_t function_row_physmap_show(struct device *dev,
                                         struct device_attribute *attr,
                                         char *buf)
 {
-       ssize_t size = 0;
-       int i;
-       struct cros_ec_keyb *ckdev = dev_get_drvdata(dev);
-       u16 *physmap = ckdev->function_row_physmap;
-
-       for (i = 0; i < ckdev->num_function_row_keys; i++)
-               size += scnprintf(buf + size, PAGE_SIZE - size,
-                                 "%s%02X", size ? " " : "", physmap[i]);
-       if (size)
-               size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
+       const struct cros_ec_keyb *ckdev = dev_get_drvdata(dev);
+       const struct vivaldi_data *data = &ckdev->vdata;
 
-       return size;
+       return vivaldi_function_row_physmap_show(data, buf);
 }
 
 static DEVICE_ATTR_RO(function_row_physmap);
@@ -648,7 +637,7 @@ static umode_t cros_ec_keyb_attr_is_visible(struct kobject *kobj,
        struct cros_ec_keyb *ckdev = dev_get_drvdata(dev);
 
        if (attr == &dev_attr_function_row_physmap.attr &&
-           !ckdev->num_function_row_keys)
+           !ckdev->vdata.num_function_row_keys)
                return 0;
 
        return attr->mode;
diff --git a/drivers/input/keyboard/mt6779-keypad.c b/drivers/input/keyboard/mt6779-keypad.c
new file mode 100644 (file)
index 0000000..0dbbddc
--- /dev/null
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ * Author Fengping Yu <fengping.yu@mediatek.com>
+ */
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define MTK_KPD_NAME           "mt6779-keypad"
+#define MTK_KPD_MEM            0x0004
+#define MTK_KPD_DEBOUNCE       0x0018
+#define MTK_KPD_DEBOUNCE_MASK  GENMASK(13, 0)
+#define MTK_KPD_DEBOUNCE_MAX_MS        256
+#define MTK_KPD_NUM_MEMS       5
+#define MTK_KPD_NUM_BITS       136     /* 4*32+8 MEM5 only use 8 BITS */
+
+struct mt6779_keypad {
+       struct regmap *regmap;
+       struct input_dev *input_dev;
+       struct clk *clk;
+       void __iomem *base;
+       u32 n_rows;
+       u32 n_cols;
+       DECLARE_BITMAP(keymap_state, MTK_KPD_NUM_BITS);
+};
+
+static const struct regmap_config mt6779_keypad_regmap_cfg = {
+       .reg_bits = 32,
+       .val_bits = 32,
+       .reg_stride = sizeof(u32),
+       .max_register = 36,
+};
+
+static irqreturn_t mt6779_keypad_irq_handler(int irq, void *dev_id)
+{
+       struct mt6779_keypad *keypad = dev_id;
+       const unsigned short *keycode = keypad->input_dev->keycode;
+       DECLARE_BITMAP(new_state, MTK_KPD_NUM_BITS);
+       DECLARE_BITMAP(change, MTK_KPD_NUM_BITS);
+       unsigned int bit_nr;
+       unsigned int row, col;
+       unsigned int scancode;
+       unsigned int row_shift = get_count_order(keypad->n_cols);
+       bool pressed;
+
+       regmap_bulk_read(keypad->regmap, MTK_KPD_MEM,
+                        new_state, MTK_KPD_NUM_MEMS);
+
+       bitmap_xor(change, new_state, keypad->keymap_state, MTK_KPD_NUM_BITS);
+
+       for_each_set_bit(bit_nr, change, MTK_KPD_NUM_BITS) {
+               /*
+                * Registers are 32bits, but only bits [15:0] are used to
+                * indicate key status.
+                */
+               if (bit_nr % 32 >= 16)
+                       continue;
+
+               row = bit_nr / 32;
+               col = bit_nr % 32;
+               scancode = MATRIX_SCAN_CODE(row, col, row_shift);
+               /* 1: not pressed, 0: pressed */
+               pressed = !test_bit(bit_nr, new_state);
+               dev_dbg(&keypad->input_dev->dev, "%s",
+                       pressed ? "pressed" : "released");
+
+               input_event(keypad->input_dev, EV_MSC, MSC_SCAN, scancode);
+               input_report_key(keypad->input_dev, keycode[scancode], pressed);
+               input_sync(keypad->input_dev);
+
+               dev_dbg(&keypad->input_dev->dev,
+                       "report Linux keycode = %d\n", keycode[scancode]);
+       }
+
+       bitmap_copy(keypad->keymap_state, new_state, MTK_KPD_NUM_BITS);
+
+       return IRQ_HANDLED;
+}
+
+static void mt6779_keypad_clk_disable(void *data)
+{
+       clk_disable_unprepare(data);
+}
+
+static int mt6779_keypad_pdrv_probe(struct platform_device *pdev)
+{
+       struct mt6779_keypad *keypad;
+       int irq;
+       u32 debounce;
+       bool wakeup;
+       int error;
+
+       keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad), GFP_KERNEL);
+       if (!keypad)
+               return -ENOMEM;
+
+       keypad->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(keypad->base))
+               return PTR_ERR(keypad->base);
+
+       keypad->regmap = devm_regmap_init_mmio(&pdev->dev, keypad->base,
+                                              &mt6779_keypad_regmap_cfg);
+       if (IS_ERR(keypad->regmap)) {
+               dev_err(&pdev->dev,
+                       "regmap init failed:%pe\n", keypad->regmap);
+               return PTR_ERR(keypad->regmap);
+       }
+
+       bitmap_fill(keypad->keymap_state, MTK_KPD_NUM_BITS);
+
+       keypad->input_dev = devm_input_allocate_device(&pdev->dev);
+       if (!keypad->input_dev) {
+               dev_err(&pdev->dev, "Failed to allocate input dev\n");
+               return -ENOMEM;
+       }
+
+       keypad->input_dev->name = MTK_KPD_NAME;
+       keypad->input_dev->id.bustype = BUS_HOST;
+
+       error = matrix_keypad_parse_properties(&pdev->dev, &keypad->n_rows,
+                                              &keypad->n_cols);
+       if (error) {
+               dev_err(&pdev->dev, "Failed to parse keypad params\n");
+               return error;
+       }
+
+       if (device_property_read_u32(&pdev->dev, "debounce-delay-ms",
+                                    &debounce))
+               debounce = 16;
+
+       if (debounce > MTK_KPD_DEBOUNCE_MAX_MS) {
+               dev_err(&pdev->dev,
+                       "Debounce time exceeds the maximum allowed time %dms\n",
+                       MTK_KPD_DEBOUNCE_MAX_MS);
+               return -EINVAL;
+       }
+
+       wakeup = device_property_read_bool(&pdev->dev, "wakeup-source");
+
+       dev_dbg(&pdev->dev, "n_row=%d n_col=%d debounce=%d\n",
+               keypad->n_rows, keypad->n_cols, debounce);
+
+       error = matrix_keypad_build_keymap(NULL, NULL,
+                                          keypad->n_rows, keypad->n_cols,
+                                          NULL, keypad->input_dev);
+       if (error) {
+               dev_err(&pdev->dev, "Failed to build keymap\n");
+               return error;
+       }
+
+       input_set_capability(keypad->input_dev, EV_MSC, MSC_SCAN);
+
+       regmap_write(keypad->regmap, MTK_KPD_DEBOUNCE,
+                    (debounce * (1 << 5)) & MTK_KPD_DEBOUNCE_MASK);
+
+       keypad->clk = devm_clk_get(&pdev->dev, "kpd");
+       if (IS_ERR(keypad->clk))
+               return PTR_ERR(keypad->clk);
+
+       error = clk_prepare_enable(keypad->clk);
+       if (error) {
+               dev_err(&pdev->dev, "cannot prepare/enable keypad clock\n");
+               return error;
+       }
+
+       error = devm_add_action_or_reset(&pdev->dev, mt6779_keypad_clk_disable,
+                                        keypad->clk);
+       if (error)
+               return error;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0)
+               return irq;
+
+       error = devm_request_threaded_irq(&pdev->dev, irq,
+                                         NULL, mt6779_keypad_irq_handler,
+                                         IRQF_ONESHOT, MTK_KPD_NAME, keypad);
+       if (error) {
+               dev_err(&pdev->dev, "Failed to request IRQ#%d: %d\n",
+                       irq, error);
+               return error;
+       }
+
+       error = input_register_device(keypad->input_dev);
+       if (error) {
+               dev_err(&pdev->dev, "Failed to register device\n");
+               return error;
+       }
+
+       error = device_init_wakeup(&pdev->dev, wakeup);
+       if (error)
+               dev_warn(&pdev->dev, "device_init_wakeup() failed: %d\n",
+                        error);
+
+       return 0;
+}
+
+static const struct of_device_id mt6779_keypad_of_match[] = {
+       { .compatible = "mediatek,mt6779-keypad" },
+       { .compatible = "mediatek,mt6873-keypad" },
+       { /* sentinel */ }
+};
+
+static struct platform_driver mt6779_keypad_pdrv = {
+       .probe = mt6779_keypad_pdrv_probe,
+       .driver = {
+                  .name = MTK_KPD_NAME,
+                  .of_match_table = mt6779_keypad_of_match,
+       },
+};
+module_platform_driver(mt6779_keypad_pdrv);
+
+MODULE_AUTHOR("Mediatek Corporation");
+MODULE_DESCRIPTION("MTK Keypad (KPD) Driver");
+MODULE_LICENSE("GPL");
index 62391d6c7da6f9b4e0f6bc9198d9b5547a68a5df..c31ab4368388fb5309067b832b2bc838eaf78316 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/mfd/mt6323/registers.h>
+#include <linux/mfd/mt6358/registers.h>
 #include <linux/mfd/mt6397/core.h>
 #include <linux/mfd/mt6397/registers.h>
 #include <linux/module.h>
@@ -74,11 +75,22 @@ static const struct mtk_pmic_regs mt6323_regs = {
        .pmic_rst_reg = MT6323_TOP_RST_MISC,
 };
 
+static const struct mtk_pmic_regs mt6358_regs = {
+       .keys_regs[MTK_PMIC_PWRKEY_INDEX] =
+               MTK_PMIC_KEYS_REGS(MT6358_TOPSTATUS,
+                                  0x2, MT6358_PSC_TOP_INT_CON0, 0x5),
+       .keys_regs[MTK_PMIC_HOMEKEY_INDEX] =
+               MTK_PMIC_KEYS_REGS(MT6358_TOPSTATUS,
+                                  0x8, MT6358_PSC_TOP_INT_CON0, 0xa),
+       .pmic_rst_reg = MT6358_TOP_RST_MISC,
+};
+
 struct mtk_pmic_keys_info {
        struct mtk_pmic_keys *keys;
        const struct mtk_pmic_keys_regs *regs;
        unsigned int keycode;
        int irq;
+       int irq_r; /* optional: release irq if different */
        bool wakeup:1;
 };
 
@@ -188,6 +200,18 @@ static int mtk_pmic_key_setup(struct mtk_pmic_keys *keys,
                return ret;
        }
 
+       if (info->irq_r > 0) {
+               ret = devm_request_threaded_irq(keys->dev, info->irq_r, NULL,
+                                               mtk_pmic_keys_irq_handler_thread,
+                                               IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+                                               "mtk-pmic-keys", info);
+               if (ret) {
+                       dev_err(keys->dev, "Failed to request IRQ_r: %d: %d\n",
+                               info->irq, ret);
+                       return ret;
+               }
+       }
+
        input_set_capability(keys->input_dev, EV_KEY, info->keycode);
 
        return 0;
@@ -199,8 +223,11 @@ static int __maybe_unused mtk_pmic_keys_suspend(struct device *dev)
        int index;
 
        for (index = 0; index < MTK_PMIC_MAX_KEY_COUNT; index++) {
-               if (keys->keys[index].wakeup)
+               if (keys->keys[index].wakeup) {
                        enable_irq_wake(keys->keys[index].irq);
+                       if (keys->keys[index].irq_r > 0)
+                               enable_irq_wake(keys->keys[index].irq_r);
+               }
        }
 
        return 0;
@@ -212,8 +239,11 @@ static int __maybe_unused mtk_pmic_keys_resume(struct device *dev)
        int index;
 
        for (index = 0; index < MTK_PMIC_MAX_KEY_COUNT; index++) {
-               if (keys->keys[index].wakeup)
+               if (keys->keys[index].wakeup) {
                        disable_irq_wake(keys->keys[index].irq);
+                       if (keys->keys[index].irq_r > 0)
+                               disable_irq_wake(keys->keys[index].irq_r);
+               }
        }
 
        return 0;
@@ -229,6 +259,9 @@ static const struct of_device_id of_mtk_pmic_keys_match_tbl[] = {
        }, {
                .compatible = "mediatek,mt6323-keys",
                .data = &mt6323_regs,
+       }, {
+               .compatible = "mediatek,mt6358-keys",
+               .data = &mt6358_regs,
        }, {
                /* sentinel */
        }
@@ -241,6 +274,8 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev)
        unsigned int keycount;
        struct mt6397_chip *pmic_chip = dev_get_drvdata(pdev->dev.parent);
        struct device_node *node = pdev->dev.of_node, *child;
+       static const char *const irqnames[] = { "powerkey", "homekey" };
+       static const char *const irqnames_r[] = { "powerkey_r", "homekey_r" };
        struct mtk_pmic_keys *keys;
        const struct mtk_pmic_regs *mtk_pmic_regs;
        struct input_dev *input_dev;
@@ -268,7 +303,8 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev)
        input_dev->id.version = 0x0001;
 
        keycount = of_get_available_child_count(node);
-       if (keycount > MTK_PMIC_MAX_KEY_COUNT) {
+       if (keycount > MTK_PMIC_MAX_KEY_COUNT ||
+           keycount > ARRAY_SIZE(irqnames)) {
                dev_err(keys->dev, "too many keys defined (%d)\n", keycount);
                return -EINVAL;
        }
@@ -276,12 +312,23 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev)
        for_each_child_of_node(node, child) {
                keys->keys[index].regs = &mtk_pmic_regs->keys_regs[index];
 
-               keys->keys[index].irq = platform_get_irq(pdev, index);
+               keys->keys[index].irq =
+                       platform_get_irq_byname(pdev, irqnames[index]);
                if (keys->keys[index].irq < 0) {
                        of_node_put(child);
                        return keys->keys[index].irq;
                }
 
+               if (of_device_is_compatible(node, "mediatek,mt6358-keys")) {
+                       keys->keys[index].irq_r = platform_get_irq_byname(pdev,
+                                                                         irqnames_r[index]);
+
+                       if (keys->keys[index].irq_r < 0) {
+                               of_node_put(child);
+                               return keys->keys[index].irq_r;
+                       }
+               }
+
                error = of_property_read_u32(child,
                        "linux,keycodes", &keys->keys[index].keycode);
                if (error) {
index 79851923ee576ab59d28d2f764b30d6b6a4b2f67..b14a389600c9eab7c356d6a85bea78aaf1e46289 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright (C) 2015  Dialog Semiconductor Ltd.
  */
 
+#include <linux/devm-helpers.h>
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/input.h>
@@ -182,13 +183,6 @@ static irqreturn_t da9063_onkey_irq_handler(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static void da9063_cancel_poll(void *data)
-{
-       struct da9063_onkey *onkey = data;
-
-       cancel_delayed_work_sync(&onkey->work);
-}
-
 static int da9063_onkey_probe(struct platform_device *pdev)
 {
        struct da9063_onkey *onkey;
@@ -234,9 +228,8 @@ static int da9063_onkey_probe(struct platform_device *pdev)
 
        input_set_capability(onkey->input, EV_KEY, KEY_POWER);
 
-       INIT_DELAYED_WORK(&onkey->work, da9063_poll_on);
-
-       error = devm_add_action(&pdev->dev, da9063_cancel_poll, onkey);
+       error = devm_delayed_work_autocancel(&pdev->dev, &onkey->work,
+                                            da9063_poll_on);
        if (error) {
                dev_err(&pdev->dev,
                        "Failed to add cancel poll action: %d\n",
index ffad142801b395fda9a850685d9fdb96b5629cd4..434d48ae4b12e8a0fc27658d26ff55e18812497f 100644 (file)
@@ -186,6 +186,7 @@ static const char * const smbus_pnp_ids[] = {
        "LEN2044", /* L470  */
        "LEN2054", /* E480 */
        "LEN2055", /* E580 */
+       "LEN2064", /* T14 Gen 1 AMD / P14s Gen 1 AMD */
        "LEN2068", /* T14 Gen 1 */
        "SYN3052", /* HP EliteBook 840 G4 */
        "SYN3221", /* HP 15-ay000 */
index 8970b49ea09a25053d79b90b79014f3619badc37..9b02dd5dd2b99895c95db0739f56493aa1cfb397 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/of.h>
 #include <linux/jiffies.h>
 #include <linux/delay.h>
+#include <linux/timekeeping.h>
 
 #define DRIVER_NAME            "ps2-gpio"
 
 #define PS2_DATA_BIT7          8
 #define PS2_PARITY_BIT         9
 #define PS2_STOP_BIT           10
-#define PS2_TX_TIMEOUT         11
-#define PS2_ACK_BIT            12
+#define PS2_ACK_BIT            11
 
 #define PS2_DEV_RET_ACK                0xfa
 #define PS2_DEV_RET_NACK       0xfe
 
 #define PS2_CMD_RESEND         0xfe
 
+/*
+ * The PS2 protocol specifies a clock frequency between 10kHz and 16.7kHz,
+ * therefore the maximal interrupt interval should be 100us and the minimum
+ * interrupt interval should be ~60us. Let's allow +/- 20us for frequency
+ * deviations and interrupt latency.
+ *
+ * The data line must be samples after ~30us to 50us after the falling edge,
+ * since the device updates the data line at the rising edge.
+ *
+ * ___            ______            ______            ______            ___
+ *    \          /      \          /      \          /      \          /
+ *     \        /        \        /        \        /        \        /
+ *      \______/          \______/          \______/          \______/
+ *
+ *     |-----------------|                 |--------|
+ *          60us/100us                      30us/50us
+ */
+#define PS2_CLK_FREQ_MIN_HZ            10000
+#define PS2_CLK_FREQ_MAX_HZ            16700
+#define PS2_CLK_MIN_INTERVAL_US                ((1000 * 1000) / PS2_CLK_FREQ_MAX_HZ)
+#define PS2_CLK_MAX_INTERVAL_US                ((1000 * 1000) / PS2_CLK_FREQ_MIN_HZ)
+#define PS2_IRQ_MIN_INTERVAL_US                (PS2_CLK_MIN_INTERVAL_US - 20)
+#define PS2_IRQ_MAX_INTERVAL_US                (PS2_CLK_MAX_INTERVAL_US + 20)
+
 struct ps2_gpio_data {
        struct device *dev;
        struct serio *serio;
@@ -52,19 +76,30 @@ struct ps2_gpio_data {
        struct gpio_desc *gpio_data;
        bool write_enable;
        int irq;
-       unsigned char rx_cnt;
-       unsigned char rx_byte;
-       unsigned char tx_cnt;
-       unsigned char tx_byte;
-       struct completion tx_done;
-       struct mutex tx_mutex;
-       struct delayed_work tx_work;
+       ktime_t t_irq_now;
+       ktime_t t_irq_last;
+       struct {
+               unsigned char cnt;
+               unsigned char byte;
+       } rx;
+       struct {
+               unsigned char cnt;
+               unsigned char byte;
+               ktime_t t_xfer_start;
+               ktime_t t_xfer_end;
+               struct completion complete;
+               struct mutex mutex;
+               struct delayed_work work;
+       } tx;
 };
 
 static int ps2_gpio_open(struct serio *serio)
 {
        struct ps2_gpio_data *drvdata = serio->port_data;
 
+       drvdata->t_irq_last = 0;
+       drvdata->tx.t_xfer_end = 0;
+
        enable_irq(drvdata->irq);
        return 0;
 }
@@ -73,7 +108,7 @@ static void ps2_gpio_close(struct serio *serio)
 {
        struct ps2_gpio_data *drvdata = serio->port_data;
 
-       flush_delayed_work(&drvdata->tx_work);
+       flush_delayed_work(&drvdata->tx.work);
        disable_irq(drvdata->irq);
 }
 
@@ -85,9 +120,9 @@ static int __ps2_gpio_write(struct serio *serio, unsigned char val)
        gpiod_direction_output(drvdata->gpio_clk, 0);
 
        drvdata->mode = PS2_MODE_TX;
-       drvdata->tx_byte = val;
+       drvdata->tx.byte = val;
 
-       schedule_delayed_work(&drvdata->tx_work, usecs_to_jiffies(200));
+       schedule_delayed_work(&drvdata->tx.work, usecs_to_jiffies(200));
 
        return 0;
 }
@@ -98,12 +133,12 @@ static int ps2_gpio_write(struct serio *serio, unsigned char val)
        int ret = 0;
 
        if (in_task()) {
-               mutex_lock(&drvdata->tx_mutex);
+               mutex_lock(&drvdata->tx.mutex);
                __ps2_gpio_write(serio, val);
-               if (!wait_for_completion_timeout(&drvdata->tx_done,
+               if (!wait_for_completion_timeout(&drvdata->tx.complete,
                                                 msecs_to_jiffies(10000)))
                        ret = SERIO_TIMEOUT;
-               mutex_unlock(&drvdata->tx_mutex);
+               mutex_unlock(&drvdata->tx.mutex);
        } else {
                __ps2_gpio_write(serio, val);
        }
@@ -115,9 +150,10 @@ static void ps2_gpio_tx_work_fn(struct work_struct *work)
 {
        struct delayed_work *dwork = to_delayed_work(work);
        struct ps2_gpio_data *drvdata = container_of(dwork,
-                                                   struct ps2_gpio_data,
-                                                   tx_work);
+                                                    struct ps2_gpio_data,
+                                                    tx.work);
 
+       drvdata->tx.t_xfer_start = ktime_get();
        enable_irq(drvdata->irq);
        gpiod_direction_output(drvdata->gpio_data, 0);
        gpiod_direction_input(drvdata->gpio_clk);
@@ -128,20 +164,31 @@ static irqreturn_t ps2_gpio_irq_rx(struct ps2_gpio_data *drvdata)
        unsigned char byte, cnt;
        int data;
        int rxflags = 0;
-       static unsigned long old_jiffies;
+       s64 us_delta;
 
-       byte = drvdata->rx_byte;
-       cnt = drvdata->rx_cnt;
+       byte = drvdata->rx.byte;
+       cnt = drvdata->rx.cnt;
 
-       if (old_jiffies == 0)
-               old_jiffies = jiffies;
+       drvdata->t_irq_now = ktime_get();
+
+       /*
+        * We need to consider spurious interrupts happening right after
+        * a TX xfer finished.
+        */
+       us_delta = ktime_us_delta(drvdata->t_irq_now, drvdata->tx.t_xfer_end);
+       if (unlikely(us_delta < PS2_IRQ_MIN_INTERVAL_US))
+               goto end;
 
-       if ((jiffies - old_jiffies) > usecs_to_jiffies(100)) {
+       us_delta = ktime_us_delta(drvdata->t_irq_now, drvdata->t_irq_last);
+       if (us_delta > PS2_IRQ_MAX_INTERVAL_US && cnt) {
                dev_err(drvdata->dev,
                        "RX: timeout, probably we missed an interrupt\n");
                goto err;
+       } else if (unlikely(us_delta < PS2_IRQ_MIN_INTERVAL_US)) {
+               /* Ignore spurious IRQs. */
+               goto end;
        }
-       old_jiffies = jiffies;
+       drvdata->t_irq_last = drvdata->t_irq_now;
 
        data = gpiod_get_value(drvdata->gpio_data);
        if (unlikely(data < 0)) {
@@ -178,8 +225,16 @@ static irqreturn_t ps2_gpio_irq_rx(struct ps2_gpio_data *drvdata)
                        if (!drvdata->write_enable)
                                goto err;
                }
+               break;
+       case PS2_STOP_BIT:
+               /* stop bit should be high */
+               if (unlikely(!data)) {
+                       dev_err(drvdata->dev, "RX: stop bit should be high\n");
+                       goto err;
+               }
 
-               /* Do not send spurious ACK's and NACK's when write fn is
+               /*
+                * Do not send spurious ACK's and NACK's when write fn is
                 * not provided.
                 */
                if (!drvdata->write_enable) {
@@ -189,23 +244,11 @@ static irqreturn_t ps2_gpio_irq_rx(struct ps2_gpio_data *drvdata)
                                break;
                }
 
-               /* Let's send the data without waiting for the stop bit to be
-                * sent. It may happen that we miss the stop bit. When this
-                * happens we have no way to recover from this, certainly
-                * missing the parity bit would be recognized when processing
-                * the stop bit. When missing both, data is lost.
-                */
                serio_interrupt(drvdata->serio, byte, rxflags);
                dev_dbg(drvdata->dev, "RX: sending byte 0x%x\n", byte);
-               break;
-       case PS2_STOP_BIT:
-               /* stop bit should be high */
-               if (unlikely(!data)) {
-                       dev_err(drvdata->dev, "RX: stop bit should be high\n");
-                       goto err;
-               }
+
                cnt = byte = 0;
-               old_jiffies = 0;
+
                goto end; /* success */
        default:
                dev_err(drvdata->dev, "RX: got out of sync with the device\n");
@@ -217,11 +260,10 @@ static irqreturn_t ps2_gpio_irq_rx(struct ps2_gpio_data *drvdata)
 
 err:
        cnt = byte = 0;
-       old_jiffies = 0;
        __ps2_gpio_write(drvdata->serio, PS2_CMD_RESEND);
 end:
-       drvdata->rx_cnt = cnt;
-       drvdata->rx_byte = byte;
+       drvdata->rx.cnt = cnt;
+       drvdata->rx.byte = byte;
        return IRQ_HANDLED;
 }
 
@@ -229,20 +271,34 @@ static irqreturn_t ps2_gpio_irq_tx(struct ps2_gpio_data *drvdata)
 {
        unsigned char byte, cnt;
        int data;
-       static unsigned long old_jiffies;
+       s64 us_delta;
+
+       cnt = drvdata->tx.cnt;
+       byte = drvdata->tx.byte;
 
-       cnt = drvdata->tx_cnt;
-       byte = drvdata->tx_byte;
+       drvdata->t_irq_now = ktime_get();
 
-       if (old_jiffies == 0)
-               old_jiffies = jiffies;
+       /*
+        * There might be pending IRQs since we disabled IRQs in
+        * __ps2_gpio_write().  We can expect at least one clock period until
+        * the device generates the first falling edge after releasing the
+        * clock line.
+        */
+       us_delta = ktime_us_delta(drvdata->t_irq_now,
+                                 drvdata->tx.t_xfer_start);
+       if (unlikely(us_delta < PS2_CLK_MIN_INTERVAL_US))
+               goto end;
 
-       if ((jiffies - old_jiffies) > usecs_to_jiffies(100)) {
+       us_delta = ktime_us_delta(drvdata->t_irq_now, drvdata->t_irq_last);
+       if (us_delta > PS2_IRQ_MAX_INTERVAL_US && cnt > 1) {
                dev_err(drvdata->dev,
                        "TX: timeout, probably we missed an interrupt\n");
                goto err;
+       } else if (unlikely(us_delta < PS2_IRQ_MIN_INTERVAL_US)) {
+               /* Ignore spurious IRQs. */
+               goto end;
        }
-       old_jiffies = jiffies;
+       drvdata->t_irq_last = drvdata->t_irq_now;
 
        switch (cnt) {
        case PS2_START_BIT:
@@ -270,27 +326,22 @@ static irqreturn_t ps2_gpio_irq_tx(struct ps2_gpio_data *drvdata)
                /* release data line to generate stop bit */
                gpiod_direction_input(drvdata->gpio_data);
                break;
-       case PS2_TX_TIMEOUT:
-               /* Devices generate one extra clock pulse before sending the
-                * acknowledgment.
-                */
-               break;
        case PS2_ACK_BIT:
-               gpiod_direction_input(drvdata->gpio_data);
                data = gpiod_get_value(drvdata->gpio_data);
                if (data) {
                        dev_warn(drvdata->dev, "TX: received NACK, retry\n");
                        goto err;
                }
 
+               drvdata->tx.t_xfer_end = ktime_get();
                drvdata->mode = PS2_MODE_RX;
-               complete(&drvdata->tx_done);
+               complete(&drvdata->tx.complete);
 
                cnt = 1;
-               old_jiffies = 0;
                goto end; /* success */
        default:
-               /* Probably we missed the stop bit. Therefore we release data
+               /*
+                * Probably we missed the stop bit. Therefore we release data
                 * line and try again.
                 */
                gpiod_direction_input(drvdata->gpio_data);
@@ -303,11 +354,10 @@ static irqreturn_t ps2_gpio_irq_tx(struct ps2_gpio_data *drvdata)
 
 err:
        cnt = 1;
-       old_jiffies = 0;
        gpiod_direction_input(drvdata->gpio_data);
-       __ps2_gpio_write(drvdata->serio, drvdata->tx_byte);
+       __ps2_gpio_write(drvdata->serio, drvdata->tx.byte);
 end:
-       drvdata->tx_cnt = cnt;
+       drvdata->tx.cnt = cnt;
        return IRQ_HANDLED;
 }
 
@@ -322,14 +372,19 @@ static irqreturn_t ps2_gpio_irq(int irq, void *dev_id)
 static int ps2_gpio_get_props(struct device *dev,
                                 struct ps2_gpio_data *drvdata)
 {
-       drvdata->gpio_data = devm_gpiod_get(dev, "data", GPIOD_IN);
+       enum gpiod_flags gflags;
+
+       /* Enforce open drain, since this is required by the PS/2 bus. */
+       gflags = GPIOD_IN | GPIOD_FLAGS_BIT_OPEN_DRAIN;
+
+       drvdata->gpio_data = devm_gpiod_get(dev, "data", gflags);
        if (IS_ERR(drvdata->gpio_data)) {
                dev_err(dev, "failed to request data gpio: %ld",
                        PTR_ERR(drvdata->gpio_data));
                return PTR_ERR(drvdata->gpio_data);
        }
 
-       drvdata->gpio_clk = devm_gpiod_get(dev, "clk", GPIOD_IN);
+       drvdata->gpio_clk = devm_gpiod_get(dev, "clk", gflags);
        if (IS_ERR(drvdata->gpio_clk)) {
                dev_err(dev, "failed to request clock gpio: %ld",
                        PTR_ERR(drvdata->gpio_clk));
@@ -387,7 +442,8 @@ static int ps2_gpio_probe(struct platform_device *pdev)
        serio->id.type = SERIO_8042;
        serio->open = ps2_gpio_open;
        serio->close = ps2_gpio_close;
-       /* Write can be enabled in platform/dt data, but possibly it will not
+       /*
+        * Write can be enabled in platform/dt data, but possibly it will not
         * work because of the tough timings.
         */
        serio->write = drvdata->write_enable ? ps2_gpio_write : NULL;
@@ -400,14 +456,15 @@ static int ps2_gpio_probe(struct platform_device *pdev)
        drvdata->dev = dev;
        drvdata->mode = PS2_MODE_RX;
 
-       /* Tx count always starts at 1, as the start bit is sent implicitly by
+       /*
+        * Tx count always starts at 1, as the start bit is sent implicitly by
         * host-to-device communication initialization.
         */
-       drvdata->tx_cnt = 1;
+       drvdata->tx.cnt = 1;
 
-       INIT_DELAYED_WORK(&drvdata->tx_work, ps2_gpio_tx_work_fn);
-       init_completion(&drvdata->tx_done);
-       mutex_init(&drvdata->tx_mutex);
+       INIT_DELAYED_WORK(&drvdata->tx.work, ps2_gpio_tx_work_fn);
+       init_completion(&drvdata->tx.complete);
+       mutex_init(&drvdata->tx.mutex);
 
        serio_register_port(serio);
        platform_set_drvdata(pdev, drvdata);
index ff7794cecf69cd9b6da79965e42ca8c829c81209..43c7d6e5bdc07ef992398dfae2ecb6bae09622df 100644 (file)
@@ -638,6 +638,16 @@ config TOUCHSCREEN_MTOUCH
          To compile this driver as a module, choose M here: the
          module will be called mtouch.
 
+config TOUCHSCREEN_IMAGIS
+       tristate "Imagis touchscreen support"
+       depends on I2C
+       help
+         Say Y here if you have an Imagis IST30xxC touchscreen.
+         If unsure, say N.
+
+         To compile this driver as a module, choose M here: the
+         module will be called imagis.
+
 config TOUCHSCREEN_IMX6UL_TSC
        tristate "Freescale i.MX6UL touchscreen controller"
        depends on ((OF && GPIOLIB) || COMPILE_TEST) && HAS_IOMEM
index 39a8127cf6a55fcf27e145db77007620ceb47072..557f84fd20755f6c7ef213b847c4fb2d36d37c60 100644 (file)
@@ -49,6 +49,7 @@ obj-$(CONFIG_TOUCHSCREEN_GOODIX)      += goodix_ts.o
 obj-$(CONFIG_TOUCHSCREEN_HIDEEP)       += hideep.o
 obj-$(CONFIG_TOUCHSCREEN_ILI210X)      += ili210x.o
 obj-$(CONFIG_TOUCHSCREEN_ILITEK)       += ilitek_ts_i2c.o
+obj-$(CONFIG_TOUCHSCREEN_IMAGIS)       += imagis.o
 obj-$(CONFIG_TOUCHSCREEN_IMX6UL_TSC)   += imx6ul_tsc.o
 obj-$(CONFIG_TOUCHSCREEN_INEXIO)       += inexio.o
 obj-$(CONFIG_TOUCHSCREEN_IPROC)                += bcm_iproc_tsc.o
index 752e8ba4fecb1c7c1c4dc3983d941fb47c606bd2..3ad9870db1081f825e1a5076c60121b46410c70c 100644 (file)
@@ -298,32 +298,17 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
        return -ENOMSG;
 }
 
-static struct input_dev *goodix_create_pen_input(struct goodix_ts_data *ts)
+static int goodix_create_pen_input(struct goodix_ts_data *ts)
 {
        struct device *dev = &ts->client->dev;
        struct input_dev *input;
 
        input = devm_input_allocate_device(dev);
        if (!input)
-               return NULL;
-
-       input_alloc_absinfo(input);
-       if (!input->absinfo) {
-               input_free_device(input);
-               return NULL;
-       }
-
-       input->absinfo[ABS_X] = ts->input_dev->absinfo[ABS_MT_POSITION_X];
-       input->absinfo[ABS_Y] = ts->input_dev->absinfo[ABS_MT_POSITION_Y];
-       __set_bit(ABS_X, input->absbit);
-       __set_bit(ABS_Y, input->absbit);
-       input_set_abs_params(input, ABS_PRESSURE, 0, 255, 0, 0);
+               return -ENOMEM;
 
-       input_set_capability(input, EV_KEY, BTN_TOUCH);
-       input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
-       input_set_capability(input, EV_KEY, BTN_STYLUS);
-       input_set_capability(input, EV_KEY, BTN_STYLUS2);
-       __set_bit(INPUT_PROP_DIRECT, input->propbit);
+       input_copy_abs(input, ABS_X, ts->input_dev, ABS_MT_POSITION_X);
+       input_copy_abs(input, ABS_Y, ts->input_dev, ABS_MT_POSITION_Y);
        /*
         * The resolution of these touchscreens is about 10 units/mm, the actual
         * resolution does not matter much since we set INPUT_PROP_DIRECT.
@@ -331,6 +316,13 @@ static struct input_dev *goodix_create_pen_input(struct goodix_ts_data *ts)
         */
        input_abs_set_res(input, ABS_X, 10);
        input_abs_set_res(input, ABS_Y, 10);
+       input_set_abs_params(input, ABS_PRESSURE, 0, 255, 0, 0);
+
+       input_set_capability(input, EV_KEY, BTN_TOUCH);
+       input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
+       input_set_capability(input, EV_KEY, BTN_STYLUS);
+       input_set_capability(input, EV_KEY, BTN_STYLUS2);
+       __set_bit(INPUT_PROP_DIRECT, input->propbit);
 
        input->name = "Goodix Active Pen";
        input->phys = "input/pen";
@@ -340,25 +332,23 @@ static struct input_dev *goodix_create_pen_input(struct goodix_ts_data *ts)
                input->id.product = 0x1001;
        input->id.version = ts->version;
 
-       if (input_register_device(input) != 0) {
-               input_free_device(input);
-               return NULL;
-       }
-
-       return input;
+       ts->input_pen = input;
+       return 0;
 }
 
 static void goodix_ts_report_pen_down(struct goodix_ts_data *ts, u8 *data)
 {
-       int input_x, input_y, input_w;
+       int input_x, input_y, input_w, error;
        u8 key_value;
 
-       if (!ts->input_pen) {
-               ts->input_pen = goodix_create_pen_input(ts);
-               if (!ts->input_pen)
-                       return;
+       if (!ts->pen_input_registered) {
+               error = input_register_device(ts->input_pen);
+               ts->pen_input_registered = (error == 0) ? 1 : error;
        }
 
+       if (ts->pen_input_registered < 0)
+               return;
+
        if (ts->contact_size == 9) {
                input_x = get_unaligned_le16(&data[4]);
                input_y = get_unaligned_le16(&data[6]);
@@ -1215,6 +1205,17 @@ static int goodix_configure_dev(struct goodix_ts_data *ts)
                return error;
        }
 
+       /*
+        * Create the input_pen device before goodix_request_irq() calls
+        * devm_request_threaded_irq() so that the devm framework frees
+        * it after disabling the irq.
+        * Unfortunately there is no way to detect if the touchscreen has pen
+        * support, so registering the dev is delayed till the first pen event.
+        */
+       error = goodix_create_pen_input(ts);
+       if (error)
+               return error;
+
        ts->irq_flags = goodix_irq_flags[ts->int_trigger_type] | IRQF_ONESHOT;
        error = goodix_request_irq(ts);
        if (error) {
index fa8602e78a6480c066103214cb06e475b36de6eb..87797cc88b3243cb677d283502b32e0dee885639 100644 (file)
@@ -94,6 +94,7 @@ struct goodix_ts_data {
        u16 version;
        bool reset_controller_at_probe;
        bool load_cfg_from_disk;
+       int pen_input_registered;
        struct completion firmware_loading_complete;
        unsigned long irq_flags;
        enum goodix_irq_pin_access_method irq_pin_access_method;
diff --git a/drivers/input/touchscreen/imagis.c b/drivers/input/touchscreen/imagis.c
new file mode 100644 (file)
index 0000000..e2697e6
--- /dev/null
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#define IST3038C_HIB_ACCESS            (0x800B << 16)
+#define IST3038C_DIRECT_ACCESS         BIT(31)
+#define IST3038C_REG_CHIPID            0x40001000
+#define IST3038C_REG_HIB_BASE          0x30000100
+#define IST3038C_REG_TOUCH_STATUS      (IST3038C_REG_HIB_BASE | IST3038C_HIB_ACCESS)
+#define IST3038C_REG_TOUCH_COORD       (IST3038C_REG_HIB_BASE | IST3038C_HIB_ACCESS | 0x8)
+#define IST3038C_REG_INTR_MESSAGE      (IST3038C_REG_HIB_BASE | IST3038C_HIB_ACCESS | 0x4)
+#define IST3038C_WHOAMI                        0x38c
+#define IST3038C_CHIP_ON_DELAY_MS      60
+#define IST3038C_I2C_RETRY_COUNT       3
+#define IST3038C_MAX_FINGER_NUM                10
+#define IST3038C_X_MASK                        GENMASK(23, 12)
+#define IST3038C_X_SHIFT               12
+#define IST3038C_Y_MASK                        GENMASK(11, 0)
+#define IST3038C_AREA_MASK             GENMASK(27, 24)
+#define IST3038C_AREA_SHIFT            24
+#define IST3038C_FINGER_COUNT_MASK     GENMASK(15, 12)
+#define IST3038C_FINGER_COUNT_SHIFT    12
+#define IST3038C_FINGER_STATUS_MASK    GENMASK(9, 0)
+
+struct imagis_ts {
+       struct i2c_client *client;
+       struct input_dev *input_dev;
+       struct touchscreen_properties prop;
+       struct regulator_bulk_data supplies[2];
+};
+
+static int imagis_i2c_read_reg(struct imagis_ts *ts,
+                              unsigned int reg, u32 *data)
+{
+       __be32 ret_be;
+       __be32 reg_be = cpu_to_be32(reg);
+       struct i2c_msg msg[] = {
+               {
+                       .addr = ts->client->addr,
+                       .flags = 0,
+                       .buf = (unsigned char *)&reg_be,
+                       .len = sizeof(reg_be),
+               }, {
+                       .addr = ts->client->addr,
+                       .flags = I2C_M_RD,
+                       .buf = (unsigned char *)&ret_be,
+                       .len = sizeof(ret_be),
+               },
+       };
+       int ret, error;
+       int retry = IST3038C_I2C_RETRY_COUNT;
+
+       /* Retry in case the controller fails to respond */
+       do {
+               ret = i2c_transfer(ts->client->adapter, msg, ARRAY_SIZE(msg));
+               if (ret == ARRAY_SIZE(msg)) {
+                       *data = be32_to_cpu(ret_be);
+                       return 0;
+               }
+
+               error = ret < 0 ? ret : -EIO;
+               dev_err(&ts->client->dev,
+                       "%s - i2c_transfer failed: %d (%d)\n",
+                       __func__, error, ret);
+       } while (--retry);
+
+       return error;
+}
+
+static irqreturn_t imagis_interrupt(int irq, void *dev_id)
+{
+       struct imagis_ts *ts = dev_id;
+       u32 intr_message, finger_status;
+       unsigned int finger_count, finger_pressed;
+       int i;
+       int error;
+
+       error = imagis_i2c_read_reg(ts, IST3038C_REG_INTR_MESSAGE,
+                                   &intr_message);
+       if (error) {
+               dev_err(&ts->client->dev,
+                       "failed to read the interrupt message: %d\n", error);
+               goto out;
+       }
+
+       finger_count = (intr_message & IST3038C_FINGER_COUNT_MASK) >>
+                               IST3038C_FINGER_COUNT_SHIFT;
+       if (finger_count > IST3038C_MAX_FINGER_NUM) {
+               dev_err(&ts->client->dev,
+                       "finger count %d is more than maximum supported\n",
+                       finger_count);
+               goto out;
+       }
+
+       finger_pressed = intr_message & IST3038C_FINGER_STATUS_MASK;
+
+       for (i = 0; i < finger_count; i++) {
+               error = imagis_i2c_read_reg(ts,
+                                           IST3038C_REG_TOUCH_COORD + (i * 4),
+                                           &finger_status);
+               if (error) {
+                       dev_err(&ts->client->dev,
+                               "failed to read coordinates for finger %d: %d\n",
+                               i, error);
+                       goto out;
+               }
+
+               input_mt_slot(ts->input_dev, i);
+               input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER,
+                                          finger_pressed & BIT(i));
+               touchscreen_report_pos(ts->input_dev, &ts->prop,
+                                      (finger_status & IST3038C_X_MASK) >>
+                                               IST3038C_X_SHIFT,
+                                      finger_status & IST3038C_Y_MASK, 1);
+               input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR,
+                                (finger_status & IST3038C_AREA_MASK) >>
+                                       IST3038C_AREA_SHIFT);
+       }
+
+       input_mt_sync_frame(ts->input_dev);
+       input_sync(ts->input_dev);
+
+out:
+       return IRQ_HANDLED;
+}
+
+static void imagis_power_off(void *_ts)
+{
+       struct imagis_ts *ts = _ts;
+
+       regulator_bulk_disable(ARRAY_SIZE(ts->supplies), ts->supplies);
+}
+
+static int imagis_power_on(struct imagis_ts *ts)
+{
+       int error;
+
+       error = regulator_bulk_enable(ARRAY_SIZE(ts->supplies), ts->supplies);
+       if (error)
+               return error;
+
+       msleep(IST3038C_CHIP_ON_DELAY_MS);
+
+       return 0;
+}
+
+static int imagis_start(struct imagis_ts *ts)
+{
+       int error;
+
+       error = imagis_power_on(ts);
+       if (error)
+               return error;
+
+       enable_irq(ts->client->irq);
+
+       return 0;
+}
+
+static int imagis_stop(struct imagis_ts *ts)
+{
+       disable_irq(ts->client->irq);
+
+       imagis_power_off(ts);
+
+       return 0;
+}
+
+static int imagis_input_open(struct input_dev *dev)
+{
+       struct imagis_ts *ts = input_get_drvdata(dev);
+
+       return imagis_start(ts);
+}
+
+static void imagis_input_close(struct input_dev *dev)
+{
+       struct imagis_ts *ts = input_get_drvdata(dev);
+
+       imagis_stop(ts);
+}
+
+static int imagis_init_input_dev(struct imagis_ts *ts)
+{
+       struct input_dev *input_dev;
+       int error;
+
+       input_dev = devm_input_allocate_device(&ts->client->dev);
+       if (!input_dev)
+               return -ENOMEM;
+
+       ts->input_dev = input_dev;
+
+       input_dev->name = "Imagis capacitive touchscreen";
+       input_dev->phys = "input/ts";
+       input_dev->id.bustype = BUS_I2C;
+       input_dev->open = imagis_input_open;
+       input_dev->close = imagis_input_close;
+
+       input_set_drvdata(input_dev, ts);
+
+       input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_X);
+       input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_Y);
+       input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+
+       touchscreen_parse_properties(input_dev, true, &ts->prop);
+       if (!ts->prop.max_x || !ts->prop.max_y) {
+               dev_err(&ts->client->dev,
+                       "Touchscreen-size-x and/or touchscreen-size-y not set in dts\n");
+               return -EINVAL;
+       }
+
+       error = input_mt_init_slots(input_dev,
+                                   IST3038C_MAX_FINGER_NUM,
+                                   INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+       if (error) {
+               dev_err(&ts->client->dev,
+                       "Failed to initialize MT slots: %d", error);
+               return error;
+       }
+
+       error = input_register_device(input_dev);
+       if (error) {
+               dev_err(&ts->client->dev,
+                       "Failed to register input device: %d", error);
+               return error;
+       }
+
+       return 0;
+}
+
+static int imagis_init_regulators(struct imagis_ts *ts)
+{
+       struct i2c_client *client = ts->client;
+
+       ts->supplies[0].supply = "vdd";
+       ts->supplies[1].supply = "vddio";
+       return devm_regulator_bulk_get(&client->dev,
+                                      ARRAY_SIZE(ts->supplies),
+                                      ts->supplies);
+}
+
+static int imagis_probe(struct i2c_client *i2c)
+{
+       struct device *dev = &i2c->dev;
+       struct imagis_ts *ts;
+       int chip_id, error;
+
+       ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
+       if (!ts)
+               return -ENOMEM;
+
+       ts->client = i2c;
+
+       error = imagis_init_regulators(ts);
+       if (error) {
+               dev_err(dev, "regulator init error: %d\n", error);
+               return error;
+       }
+
+       error = imagis_power_on(ts);
+       if (error) {
+               dev_err(dev, "failed to enable regulators: %d\n", error);
+               return error;
+       }
+
+       error = devm_add_action_or_reset(dev, imagis_power_off, ts);
+       if (error) {
+               dev_err(dev, "failed to install poweroff action: %d\n", error);
+               return error;
+       }
+
+       error = imagis_i2c_read_reg(ts,
+                       IST3038C_REG_CHIPID | IST3038C_DIRECT_ACCESS,
+                       &chip_id);
+       if (error) {
+               dev_err(dev, "chip ID read failure: %d\n", error);
+               return error;
+       }
+
+       if (chip_id != IST3038C_WHOAMI) {
+               dev_err(dev, "unknown chip ID: 0x%x\n", chip_id);
+               return -EINVAL;
+       }
+
+       error = devm_request_threaded_irq(dev, i2c->irq,
+                                         NULL, imagis_interrupt,
+                                         IRQF_ONESHOT | IRQF_NO_AUTOEN,
+                                         "imagis-touchscreen", ts);
+       if (error) {
+               dev_err(dev, "IRQ %d allocation failure: %d\n",
+                       i2c->irq, error);
+               return error;
+       }
+
+       error = imagis_init_input_dev(ts);
+       if (error)
+               return error;
+
+       return 0;
+}
+
+static int __maybe_unused imagis_suspend(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct imagis_ts *ts = i2c_get_clientdata(client);
+       int retval = 0;
+
+       mutex_lock(&ts->input_dev->mutex);
+
+       if (input_device_enabled(ts->input_dev))
+               retval = imagis_stop(ts);
+
+       mutex_unlock(&ts->input_dev->mutex);
+
+       return retval;
+}
+
+static int __maybe_unused imagis_resume(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct imagis_ts *ts = i2c_get_clientdata(client);
+       int retval = 0;
+
+       mutex_lock(&ts->input_dev->mutex);
+
+       if (input_device_enabled(ts->input_dev))
+               retval = imagis_start(ts);
+
+       mutex_unlock(&ts->input_dev->mutex);
+
+       return retval;
+}
+
+static SIMPLE_DEV_PM_OPS(imagis_pm_ops, imagis_suspend, imagis_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id imagis_of_match[] = {
+       { .compatible = "imagis,ist3038c", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, imagis_of_match);
+#endif
+
+static struct i2c_driver imagis_ts_driver = {
+       .driver = {
+               .name = "imagis-touchscreen",
+               .pm = &imagis_pm_ops,
+               .of_match_table = of_match_ptr(imagis_of_match),
+       },
+       .probe_new = imagis_probe,
+};
+
+module_i2c_driver(imagis_ts_driver);
+
+MODULE_DESCRIPTION("Imagis IST3038C Touchscreen Driver");
+MODULE_AUTHOR("Markuss Broks <markuss.broks@gmail.com>");
+MODULE_LICENSE("GPL");
index b3fa71213d603421d040b2a1503482d05db4f8cd..34c4cca57d133b717325b9ab68f7e8045fe882a7 100644 (file)
@@ -486,11 +486,11 @@ static int iqs5xx_axis_init(struct i2c_client *client)
 {
        struct iqs5xx_private *iqs5xx = i2c_get_clientdata(client);
        struct touchscreen_properties *prop = &iqs5xx->prop;
-       struct input_dev *input;
+       struct input_dev *input = iqs5xx->input;
        u16 max_x, max_y;
        int error;
 
-       if (!iqs5xx->input) {
+       if (!input) {
                input = devm_input_allocate_device(&client->dev);
                if (!input)
                        return -ENOMEM;
@@ -512,11 +512,11 @@ static int iqs5xx_axis_init(struct i2c_client *client)
        if (error)
                return error;
 
-       input_set_abs_params(iqs5xx->input, ABS_MT_POSITION_X, 0, max_x, 0, 0);
-       input_set_abs_params(iqs5xx->input, ABS_MT_POSITION_Y, 0, max_y, 0, 0);
-       input_set_abs_params(iqs5xx->input, ABS_MT_PRESSURE, 0, U16_MAX, 0, 0);
+       input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_x, 0, 0);
+       input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_y, 0, 0);
+       input_set_abs_params(input, ABS_MT_PRESSURE, 0, U16_MAX, 0, 0);
 
-       touchscreen_parse_properties(iqs5xx->input, true, prop);
+       touchscreen_parse_properties(input, true, prop);
 
        /*
         * The device reserves 0xFFFF for coordinates that correspond to slots
@@ -540,7 +540,7 @@ static int iqs5xx_axis_init(struct i2c_client *client)
                        return error;
        }
 
-       error = input_mt_init_slots(iqs5xx->input, IQS5XX_NUM_CONTACTS,
+       error = input_mt_init_slots(input, IQS5XX_NUM_CONTACTS,
                                    INPUT_MT_DIRECT);
        if (error)
                dev_err(&client->dev, "Failed to initialize slots: %d\n",
@@ -674,7 +674,7 @@ static irqreturn_t iqs5xx_irq(int irq, void *data)
                input_mt_slot(input, i);
                if (input_mt_report_slot_state(input, MT_TOOL_FINGER,
                                               pressure != 0)) {
-                       touchscreen_report_pos(iqs5xx->input, &iqs5xx->prop,
+                       touchscreen_report_pos(input, &iqs5xx->prop,
                                               be16_to_cpu(touch_data->abs_x),
                                               be16_to_cpu(touch_data->abs_y),
                                               true);
index bc11203c9cf785f5afcdf070ed5314185573a1cd..72e0b767e1ba4c87c31b5346c45746f82ff9773b 100644 (file)
@@ -339,11 +339,11 @@ static int stmfts_input_open(struct input_dev *dev)
 
        err = pm_runtime_get_sync(&sdata->client->dev);
        if (err < 0)
-               return err;
+               goto out;
 
        err = i2c_smbus_write_byte(sdata->client, STMFTS_MS_MT_SENSE_ON);
        if (err)
-               return err;
+               goto out;
 
        mutex_lock(&sdata->mutex);
        sdata->running = true;
@@ -366,7 +366,9 @@ static int stmfts_input_open(struct input_dev *dev)
                                 "failed to enable touchkey\n");
        }
 
-       return 0;
+out:
+       pm_runtime_put_noidle(&sdata->client->dev);
+       return err;
 }
 
 static void stmfts_input_close(struct input_dev *dev)
index 27810f6c69f67900d7419083e6f2e30cca716c67..72c7258b93a54612b5dbc94ca4d4fcd200fae571 100644 (file)
@@ -88,6 +88,8 @@ struct tsc200x {
        int                     in_z1;
        int                     in_z2;
 
+       struct touchscreen_properties prop;
+
        spinlock_t              lock;
        struct timer_list       penup_timer;
 
@@ -113,8 +115,7 @@ static void tsc200x_update_pen_state(struct tsc200x *ts,
                                     int x, int y, int pressure)
 {
        if (pressure) {
-               input_report_abs(ts->idev, ABS_X, x);
-               input_report_abs(ts->idev, ABS_Y, y);
+               touchscreen_report_pos(ts->idev, &ts->prop, x, y, false);
                input_report_abs(ts->idev, ABS_PRESSURE, pressure);
                if (!ts->pen_down) {
                        input_report_key(ts->idev, BTN_TOUCH, !!pressure);
@@ -533,7 +534,7 @@ int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
        input_set_abs_params(input_dev, ABS_PRESSURE,
                             0, MAX_12BIT, TSC200X_DEF_P_FUZZ, 0);
 
-       touchscreen_parse_properties(input_dev, false, NULL);
+       touchscreen_parse_properties(input_dev, false, &ts->prop);
 
        /* Ensure the touchscreen is off */
        tsc200x_stop_scan(ts);
diff --git a/drivers/input/vivaldi-fmap.c b/drivers/input/vivaldi-fmap.c
new file mode 100644 (file)
index 0000000..6dae83d
--- /dev/null
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helpers for ChromeOS Vivaldi keyboard function row mapping
+ *
+ * Copyright (C) 2022 Google, Inc
+ */
+
+#include <linux/export.h>
+#include <linux/input/vivaldi-fmap.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+/**
+ * vivaldi_function_row_physmap_show - Print vivaldi function row physmap attribute
+ * @data: The vivaldi function row map
+ * @buf: Buffer to print the function row phsymap to
+ */
+ssize_t vivaldi_function_row_physmap_show(const struct vivaldi_data *data,
+                                         char *buf)
+{
+       ssize_t size = 0;
+       int i;
+       const u32 *physmap = data->function_row_physmap;
+
+       if (!data->num_function_row_keys)
+               return 0;
+
+       for (i = 0; i < data->num_function_row_keys; i++)
+               size += scnprintf(buf + size, PAGE_SIZE - size,
+                                 "%s%02X", size ? " " : "", physmap[i]);
+       if (size)
+               size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
+
+       return size;
+}
+EXPORT_SYMBOL_GPL(vivaldi_function_row_physmap_show);
+
+MODULE_LICENSE("GPL");
index 4aab631ef51738a802fff2f70d610c1693cc1f7c..d9cf2820c02eae1ad3b1c0db47a36074671c7f4c 100644 (file)
@@ -1661,7 +1661,7 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev)
        num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus",
                                                     sizeof(phandle));
        if (num_iommus < 0)
-               return 0;
+               return ERR_PTR(-ENODEV);
 
        arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL);
        if (!arch_data)
index 680d2fcf2686386bfbee14e6e7eb94e13303070f..15edb9a6fcae0f59024fd38daa9eed827862f017 100644 (file)
@@ -433,6 +433,7 @@ config QCOM_PDC
 config QCOM_MPM
        tristate "QCOM MPM"
        depends on ARCH_QCOM
+       depends on MAILBOX
        select IRQ_DOMAIN_HIERARCHY
        help
          MSM Power Manager driver to manage and configure wakeup
index cd772973114afab89e9afe734fa91053eaa2121b..a0fc764ec9dc6462478b747484175bae76964d07 100644 (file)
@@ -3011,18 +3011,12 @@ static int __init allocate_lpi_tables(void)
        return 0;
 }
 
-static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
+static u64 read_vpend_dirty_clear(void __iomem *vlpi_base)
 {
        u32 count = 1000000;    /* 1s! */
        bool clean;
        u64 val;
 
-       val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
-       val &= ~GICR_VPENDBASER_Valid;
-       val &= ~clr;
-       val |= set;
-       gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
-
        do {
                val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
                clean = !(val & GICR_VPENDBASER_Dirty);
@@ -3033,10 +3027,26 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
                }
        } while (!clean && count);
 
-       if (unlikely(val & GICR_VPENDBASER_Dirty)) {
+       if (unlikely(!clean))
                pr_err_ratelimited("ITS virtual pending table not cleaning\n");
+
+       return val;
+}
+
+static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
+{
+       u64 val;
+
+       /* Make sure we wait until the RD is done with the initial scan */
+       val = read_vpend_dirty_clear(vlpi_base);
+       val &= ~GICR_VPENDBASER_Valid;
+       val &= ~clr;
+       val |= set;
+       gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+
+       val = read_vpend_dirty_clear(vlpi_base);
+       if (unlikely(val & GICR_VPENDBASER_Dirty))
                val |= GICR_VPENDBASER_PendingLast;
-       }
 
        return val;
 }
index 0efe1a9a9f3b234930c2a7a899a4ec16d0d4447f..b252d5534547c006b757dd2fc8db02ff92469b4e 100644 (file)
@@ -206,11 +206,11 @@ static inline void __iomem *gic_dist_base(struct irq_data *d)
        }
 }
 
-static void gic_do_wait_for_rwp(void __iomem *base)
+static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
 {
        u32 count = 1000000;    /* 1s! */
 
-       while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
+       while (readl_relaxed(base + GICD_CTLR) & bit) {
                count--;
                if (!count) {
                        pr_err_ratelimited("RWP timeout, gone fishing\n");
@@ -224,13 +224,13 @@ static void gic_do_wait_for_rwp(void __iomem *base)
 /* Wait for completion of a distributor change */
 static void gic_dist_wait_for_rwp(void)
 {
-       gic_do_wait_for_rwp(gic_data.dist_base);
+       gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP);
 }
 
 /* Wait for completion of a redistributor change */
 static void gic_redist_wait_for_rwp(void)
 {
-       gic_do_wait_for_rwp(gic_data_rdist_rd_base());
+       gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP);
 }
 
 #ifdef CONFIG_ARM64
@@ -1466,6 +1466,12 @@ static int gic_irq_domain_translate(struct irq_domain *d,
                if(fwspec->param_count != 2)
                        return -EINVAL;
 
+               if (fwspec->param[0] < 16) {
+                       pr_err(FW_BUG "Illegal GSI%d translation request\n",
+                              fwspec->param[0]);
+                       return -EINVAL;
+               }
+
                *hwirq = fwspec->param[0];
                *type = fwspec->param[1];
 
index 58ba835bee1f393fe4be3b0c3dab04367fc0c9bb..09c710ecc387de31ebd52b1d24be719c3206cfd2 100644 (file)
@@ -1123,6 +1123,12 @@ static int gic_irq_domain_translate(struct irq_domain *d,
                if(fwspec->param_count != 2)
                        return -EINVAL;
 
+               if (fwspec->param[0] < 16) {
+                       pr_err(FW_BUG "Illegal GSI%d translation request\n",
+                              fwspec->param[0]);
+                       return -EINVAL;
+               }
+
                *hwirq = fwspec->param[0];
                *type = fwspec->param[1];
 
index eea5a753618c5e1f5a376d0514a8359507a4ee0d..d30614661eea69242c78a8e46d9952d40508a72c 100644 (file)
@@ -375,7 +375,7 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
        raw_spin_lock_init(&priv->lock);
 
        priv->base = devm_platform_ioremap_resource(pdev, 0);
-       if (!priv->base)
+       if (IS_ERR(priv->base))
                return PTR_ERR(priv->base);
 
        for (i = 0; i < priv->reg_stride; i++) {
index 4081cb6cf7b340bc9e5163d3a9ec877a62401c4c..4277853c753515135c90391be507599cd827f573 100644 (file)
@@ -210,7 +210,7 @@ struct dm_table {
 #define DM_TIO_MAGIC 28714
 struct dm_target_io {
        unsigned short magic;
-       unsigned short flags;
+       blk_short_t flags;
        unsigned int target_bio_nr;
        struct dm_io *io;
        struct dm_target *ti;
@@ -244,7 +244,7 @@ static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit)
 #define DM_IO_MAGIC 19577
 struct dm_io {
        unsigned short magic;
-       unsigned short flags;
+       blk_short_t flags;
        atomic_t io_count;
        struct mapped_device *md;
        struct bio *orig_bio;
index c58a5111cb575fe8dc9f24dd900f049bafe99ca0..36ae30b73a6e0eae654528741c964c6cb915a814 100644 (file)
@@ -2472,9 +2472,11 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
                                        dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
                                        sec &= ~(sector_t)(ic->sectors_per_block - 1);
                                }
+                               if (unlikely(sec >= ic->provided_data_sectors)) {
+                                       journal_entry_set_unused(je);
+                                       continue;
+                               }
                        }
-                       if (unlikely(sec >= ic->provided_data_sectors))
-                               continue;
                        get_area_and_offset(ic, sec, &area, &offset);
                        restore_last_bytes(ic, access_journal_data(ic, i, j), je);
                        for (k = j + 1; k < ic->journal_section_entries; k++) {
@@ -4397,6 +4399,7 @@ try_smaller_buffer:
        }
 
        if (ic->internal_hash) {
+               size_t recalc_tags_size;
                ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
                if (!ic->recalc_wq ) {
                        ti->error = "Cannot allocate workqueue";
@@ -4410,8 +4413,10 @@ try_smaller_buffer:
                        r = -ENOMEM;
                        goto bad;
                }
-               ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
-                                                ic->tag_size, GFP_KERNEL);
+               recalc_tags_size = (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size;
+               if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
+                       recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
+               ic->recalc_tags = kvmalloc(recalc_tags_size, GFP_KERNEL);
                if (!ic->recalc_tags) {
                        ti->error = "Cannot allocate tags for recalculating";
                        r = -ENOMEM;
index 901abd6dea41972a2eb9079e9a353268d0c0ee1c..87310fceb0d86302c0324fbd01fb52897a50bec0 100644 (file)
@@ -891,15 +891,21 @@ static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param)
        struct hash_cell *hc = NULL;
 
        if (*param->uuid) {
-               if (*param->name || param->dev)
+               if (*param->name || param->dev) {
+                       DMERR("Invalid ioctl structure: uuid %s, name %s, dev %llx",
+                             param->uuid, param->name, (unsigned long long)param->dev);
                        return NULL;
+               }
 
                hc = __get_uuid_cell(param->uuid);
                if (!hc)
                        return NULL;
        } else if (*param->name) {
-               if (param->dev)
+               if (param->dev) {
+                       DMERR("Invalid ioctl structure: name %s, dev %llx",
+                             param->name, (unsigned long long)param->dev);
                        return NULL;
+               }
 
                hc = __get_name_cell(param->name);
                if (!hc)
@@ -1851,8 +1857,11 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
        if (copy_from_user(param_kernel, user, minimum_data_size))
                return -EFAULT;
 
-       if (param_kernel->data_size < minimum_data_size)
+       if (param_kernel->data_size < minimum_data_size) {
+               DMERR("Invalid data size in the ioctl structure: %u",
+                     param_kernel->data_size);
                return -EINVAL;
+       }
 
        secure_data = param_kernel->flags & DM_SECURE_DATA_FLAG;
 
index 875bca30a0dd5fb3181b8685441504f3f6ba5d51..82f2a06153dc0689ac317571a39346011afc5224 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/blkdev.h>
 #include <linux/slab.h>
 #include <linux/module.h>
-#include <linux/sched/clock.h>
 
 
 #define DM_MSG_PREFIX  "multipath historical-service-time"
@@ -433,7 +432,7 @@ static struct dm_path *hst_select_path(struct path_selector *ps,
 {
        struct selector *s = ps->context;
        struct path_info *pi = NULL, *best = NULL;
-       u64 time_now = sched_clock();
+       u64 time_now = ktime_get_ns();
        struct dm_path *ret = NULL;
        unsigned long flags;
 
@@ -474,7 +473,7 @@ static int hst_start_io(struct path_selector *ps, struct dm_path *path,
 
 static u64 path_service_time(struct path_info *pi, u64 start_time)
 {
-       u64 sched_now = ktime_get_ns();
+       u64 now = ktime_get_ns();
 
        /* if a previous disk request has finished after this IO was
         * sent to the hardware, pretend the submission happened
@@ -483,11 +482,11 @@ static u64 path_service_time(struct path_info *pi, u64 start_time)
        if (time_after64(pi->last_finish, start_time))
                start_time = pi->last_finish;
 
-       pi->last_finish = sched_now;
-       if (time_before64(sched_now, start_time))
+       pi->last_finish = now;
+       if (time_before64(now, start_time))
                return 0;
 
-       return sched_now - start_time;
+       return now - start_time;
 }
 
 static int hst_end_io(struct path_selector *ps, struct dm_path *path,
index c1ca9be4b79e9f27da01a4c08f88835314b6f1c9..57daa86c19cf0eb4bf7d12c2f3d05e2d09f67db2 100644 (file)
@@ -360,16 +360,20 @@ static int dm_update_zone_wp_offset(struct mapped_device *md, unsigned int zno,
        return 0;
 }
 
+struct orig_bio_details {
+       unsigned int op;
+       unsigned int nr_sectors;
+};
+
 /*
  * First phase of BIO mapping for targets with zone append emulation:
  * check all BIO that change a zone writer pointer and change zone
  * append operations into regular write operations.
  */
 static bool dm_zone_map_bio_begin(struct mapped_device *md,
-                                 struct bio *orig_bio, struct bio *clone)
+                                 unsigned int zno, struct bio *clone)
 {
        sector_t zsectors = blk_queue_zone_sectors(md->queue);
-       unsigned int zno = bio_zone_no(orig_bio);
        unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]);
 
        /*
@@ -384,7 +388,7 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md,
                WRITE_ONCE(md->zwp_offset[zno], zwp_offset);
        }
 
-       switch (bio_op(orig_bio)) {
+       switch (bio_op(clone)) {
        case REQ_OP_ZONE_RESET:
        case REQ_OP_ZONE_FINISH:
                return true;
@@ -401,9 +405,8 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md,
                 * target zone.
                 */
                clone->bi_opf = REQ_OP_WRITE | REQ_NOMERGE |
-                       (orig_bio->bi_opf & (~REQ_OP_MASK));
-               clone->bi_iter.bi_sector =
-                       orig_bio->bi_iter.bi_sector + zwp_offset;
+                       (clone->bi_opf & (~REQ_OP_MASK));
+               clone->bi_iter.bi_sector += zwp_offset;
                break;
        default:
                DMWARN_LIMIT("Invalid BIO operation");
@@ -423,11 +426,10 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md,
  * data written to a zone. Note that at this point, the remapped clone BIO
  * may already have completed, so we do not touch it.
  */
-static blk_status_t dm_zone_map_bio_end(struct mapped_device *md,
-                                       struct bio *orig_bio,
+static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, unsigned int zno,
+                                       struct orig_bio_details *orig_bio_details,
                                        unsigned int nr_sectors)
 {
-       unsigned int zno = bio_zone_no(orig_bio);
        unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]);
 
        /* The clone BIO may already have been completed and failed */
@@ -435,7 +437,7 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md,
                return BLK_STS_IOERR;
 
        /* Update the zone wp offset */
-       switch (bio_op(orig_bio)) {
+       switch (orig_bio_details->op) {
        case REQ_OP_ZONE_RESET:
                WRITE_ONCE(md->zwp_offset[zno], 0);
                return BLK_STS_OK;
@@ -452,7 +454,7 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md,
                 * Check that the target did not truncate the write operation
                 * emulating a zone append.
                 */
-               if (nr_sectors != bio_sectors(orig_bio)) {
+               if (nr_sectors != orig_bio_details->nr_sectors) {
                        DMWARN_LIMIT("Truncated write for zone append");
                        return BLK_STS_IOERR;
                }
@@ -488,7 +490,7 @@ static inline void dm_zone_unlock(struct request_queue *q,
        bio_clear_flag(clone, BIO_ZONE_WRITE_LOCKED);
 }
 
-static bool dm_need_zone_wp_tracking(struct bio *orig_bio)
+static bool dm_need_zone_wp_tracking(struct bio *bio)
 {
        /*
         * Special processing is not needed for operations that do not need the
@@ -496,15 +498,15 @@ static bool dm_need_zone_wp_tracking(struct bio *orig_bio)
         * zones and all operations that do not modify directly a sequential
         * zone write pointer.
         */
-       if (op_is_flush(orig_bio->bi_opf) && !bio_sectors(orig_bio))
+       if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
                return false;
-       switch (bio_op(orig_bio)) {
+       switch (bio_op(bio)) {
        case REQ_OP_WRITE_ZEROES:
        case REQ_OP_WRITE:
        case REQ_OP_ZONE_RESET:
        case REQ_OP_ZONE_FINISH:
        case REQ_OP_ZONE_APPEND:
-               return bio_zone_is_seq(orig_bio);
+               return bio_zone_is_seq(bio);
        default:
                return false;
        }
@@ -519,8 +521,8 @@ int dm_zone_map_bio(struct dm_target_io *tio)
        struct dm_target *ti = tio->ti;
        struct mapped_device *md = io->md;
        struct request_queue *q = md->queue;
-       struct bio *orig_bio = io->orig_bio;
        struct bio *clone = &tio->clone;
+       struct orig_bio_details orig_bio_details;
        unsigned int zno;
        blk_status_t sts;
        int r;
@@ -529,18 +531,21 @@ int dm_zone_map_bio(struct dm_target_io *tio)
         * IOs that do not change a zone write pointer do not need
         * any additional special processing.
         */
-       if (!dm_need_zone_wp_tracking(orig_bio))
+       if (!dm_need_zone_wp_tracking(clone))
                return ti->type->map(ti, clone);
 
        /* Lock the target zone */
-       zno = bio_zone_no(orig_bio);
+       zno = bio_zone_no(clone);
        dm_zone_lock(q, zno, clone);
 
+       orig_bio_details.nr_sectors = bio_sectors(clone);
+       orig_bio_details.op = bio_op(clone);
+
        /*
         * Check that the bio and the target zone write pointer offset are
         * both valid, and if the bio is a zone append, remap it to a write.
         */
-       if (!dm_zone_map_bio_begin(md, orig_bio, clone)) {
+       if (!dm_zone_map_bio_begin(md, zno, clone)) {
                dm_zone_unlock(q, zno, clone);
                return DM_MAPIO_KILL;
        }
@@ -560,7 +565,8 @@ int dm_zone_map_bio(struct dm_target_io *tio)
                 * The target submitted the clone BIO. The target zone will
                 * be unlocked on completion of the clone.
                 */
-               sts = dm_zone_map_bio_end(md, orig_bio, *tio->len_ptr);
+               sts = dm_zone_map_bio_end(md, zno, &orig_bio_details,
+                                         *tio->len_ptr);
                break;
        case DM_MAPIO_REMAPPED:
                /*
@@ -568,7 +574,8 @@ int dm_zone_map_bio(struct dm_target_io *tio)
                 * unlock the target zone here as the clone will not be
                 * submitted.
                 */
-               sts = dm_zone_map_bio_end(md, orig_bio, *tio->len_ptr);
+               sts = dm_zone_map_bio_end(md, zno, &orig_bio_details,
+                                         *tio->len_ptr);
                if (sts != BLK_STS_OK)
                        dm_zone_unlock(q, zno, clone);
                break;
index ad2e0bbeb559a584434a1b90da504ab0b7708c71..82957bd460e894556fed5eed0b991c14f3c0713b 100644 (file)
@@ -892,13 +892,19 @@ static void dm_io_complete(struct dm_io *io)
        if (unlikely(wq_has_sleeper(&md->wait)))
                wake_up(&md->wait);
 
-       if (io_error == BLK_STS_DM_REQUEUE) {
-               /*
-                * Upper layer won't help us poll split bio, io->orig_bio
-                * may only reflect a subset of the pre-split original,
-                * so clear REQ_POLLED in case of requeue
-                */
-               bio->bi_opf &= ~REQ_POLLED;
+       if (io_error == BLK_STS_DM_REQUEUE || io_error == BLK_STS_AGAIN) {
+               if (bio->bi_opf & REQ_POLLED) {
+                       /*
+                        * Upper layer won't help us poll split bio (io->orig_bio
+                        * may only reflect a subset of the pre-split original)
+                        * so clear REQ_POLLED in case of requeue.
+                        */
+                       bio->bi_opf &= ~REQ_POLLED;
+                       if (io_error == BLK_STS_AGAIN) {
+                               /* io_uring doesn't handle BLK_STS_AGAIN (yet) */
+                               queue_io(md, bio);
+                       }
+               }
                return;
        }
 
@@ -1317,8 +1323,7 @@ static void __map_bio(struct bio *clone)
 }
 
 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
-                               struct dm_target *ti, unsigned num_bios,
-                               unsigned *len)
+                               struct dm_target *ti, unsigned num_bios)
 {
        struct bio *bio;
        int try;
@@ -1329,7 +1334,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
                if (try)
                        mutex_lock(&ci->io->md->table_devices_lock);
                for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
-                       bio = alloc_tio(ci, ti, bio_nr, len,
+                       bio = alloc_tio(ci, ti, bio_nr, NULL,
                                        try ? GFP_NOIO : GFP_NOWAIT);
                        if (!bio)
                                break;
@@ -1357,11 +1362,11 @@ static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
                break;
        case 1:
                clone = alloc_tio(ci, ti, 0, len, GFP_NOIO);
-               dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
                __map_bio(clone);
                break;
        default:
-               alloc_multiple_bios(&blist, ci, ti, num_bios, len);
+               /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
+               alloc_multiple_bios(&blist, ci, ti, num_bios);
                while ((clone = bio_list_pop(&blist))) {
                        dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
                        __map_bio(clone);
@@ -1386,6 +1391,7 @@ static void __send_empty_flush(struct clone_info *ci)
 
        ci->bio = &flush_bio;
        ci->sector_count = 0;
+       ci->io->tio.clone.bi_iter.bi_size = 0;
 
        while ((ti = dm_table_get_target(ci->map, target_nr++)))
                __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
@@ -1401,14 +1407,10 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target
        len = min_t(sector_t, ci->sector_count,
                    max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
 
-       /*
-        * dm_accept_partial_bio cannot be used with duplicate bios,
-        * so update clone_info cursor before __send_duplicate_bios().
-        */
+       __send_duplicate_bios(ci, ti, num_bios, &len);
+
        ci->sector += len;
        ci->sector_count -= len;
-
-       __send_duplicate_bios(ci, ti, num_bios, &len);
 }
 
 static bool is_abnormal_io(struct bio *bio)
index 28f2bafc14d281ee3723771ae7a9ab225e020033..5afa373e534f23052caccd735e1bc92f23051a19 100644 (file)
@@ -7,6 +7,7 @@ comment "NXP media platform drivers"
 config VIDEO_IMX_MIPI_CSIS
        tristate "NXP MIPI CSI-2 CSIS receiver found on i.MX7 and i.MX8 models"
        depends on ARCH_MXC || COMPILE_TEST
+       depends on VIDEO_DEV
        select MEDIA_CONTROLLER
        select V4L2_FWNODE
        select VIDEO_V4L2_SUBDEV_API
index 4de5e8d2b261bd9939ec0a20f2e54c75b8fb45b2..3d3d1062e2122b49d166c3fe7e3b045ff370e740 100644 (file)
@@ -892,7 +892,7 @@ static int rga_probe(struct platform_device *pdev)
        }
        rga->dst_mmu_pages =
                (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
-       if (rga->dst_mmu_pages) {
+       if (!rga->dst_mmu_pages) {
                ret = -ENOMEM;
                goto free_src_pages;
        }
index 47029746b89eedf0d7a6beedb06a2f60ac5865c3..0de587b412d4e7deaa715eeca94ab17cd273f6dd 100644 (file)
@@ -77,16 +77,16 @@ err_mutex_unlock:
 }
 
 static const struct si2157_tuner_info si2157_tuners[] = {
-       { SI2141, false, 0x60, SI2141_60_FIRMWARE, SI2141_A10_FIRMWARE },
-       { SI2141, false, 0x61, SI2141_61_FIRMWARE, SI2141_A10_FIRMWARE },
-       { SI2146, false, 0x11, SI2146_11_FIRMWARE, NULL },
-       { SI2147, false, 0x50, SI2147_50_FIRMWARE, NULL },
-       { SI2148, true,  0x32, SI2148_32_FIRMWARE, SI2158_A20_FIRMWARE },
-       { SI2148, true,  0x33, SI2148_33_FIRMWARE, SI2158_A20_FIRMWARE },
-       { SI2157, false, 0x50, SI2157_50_FIRMWARE, SI2157_A30_FIRMWARE },
-       { SI2158, false, 0x50, SI2158_50_FIRMWARE, SI2158_A20_FIRMWARE },
-       { SI2158, false, 0x51, SI2158_51_FIRMWARE, SI2158_A20_FIRMWARE },
-       { SI2177, false, 0x50, SI2177_50_FIRMWARE, SI2157_A30_FIRMWARE },
+       { SI2141, 0x60, false, SI2141_60_FIRMWARE, SI2141_A10_FIRMWARE },
+       { SI2141, 0x61, false, SI2141_61_FIRMWARE, SI2141_A10_FIRMWARE },
+       { SI2146, 0x11, false, SI2146_11_FIRMWARE, NULL },
+       { SI2147, 0x50, false, SI2147_50_FIRMWARE, NULL },
+       { SI2148, 0x32, true,  SI2148_32_FIRMWARE, SI2158_A20_FIRMWARE },
+       { SI2148, 0x33, true,  SI2148_33_FIRMWARE, SI2158_A20_FIRMWARE },
+       { SI2157, 0x50, false, SI2157_50_FIRMWARE, SI2157_A30_FIRMWARE },
+       { SI2158, 0x50, false, SI2158_50_FIRMWARE, SI2158_A20_FIRMWARE },
+       { SI2158, 0x51, false, SI2158_51_FIRMWARE, SI2158_A20_FIRMWARE },
+       { SI2177, 0x50, false, SI2177_50_FIRMWARE, SI2157_A30_FIRMWARE },
 };
 
 static int si2157_load_firmware(struct dvb_frontend *fe,
@@ -178,7 +178,7 @@ static int si2157_find_and_load_firmware(struct dvb_frontend *fe)
                }
        }
 
-       if (!fw_name && !fw_alt_name) {
+       if (required && !fw_name && !fw_alt_name) {
                dev_err(&client->dev,
                        "unknown chip version Si21%d-%c%c%c ROM 0x%02x\n",
                        part_id, cmd.args[1], cmd.args[3], cmd.args[4], rom_id);
index c267283b01fdaf74c2ef094926fd65da72af0ca5..e749dcb3ddea93335a6ca5f5dd6e1de0e9a52418 100644 (file)
@@ -544,20 +544,27 @@ static int atmel_ebi_probe(struct platform_device *pdev)
        smc_np = of_parse_phandle(dev->of_node, "atmel,smc", 0);
 
        ebi->smc.regmap = syscon_node_to_regmap(smc_np);
-       if (IS_ERR(ebi->smc.regmap))
-               return PTR_ERR(ebi->smc.regmap);
+       if (IS_ERR(ebi->smc.regmap)) {
+               ret = PTR_ERR(ebi->smc.regmap);
+               goto put_node;
+       }
 
        ebi->smc.layout = atmel_hsmc_get_reg_layout(smc_np);
-       if (IS_ERR(ebi->smc.layout))
-               return PTR_ERR(ebi->smc.layout);
+       if (IS_ERR(ebi->smc.layout)) {
+               ret = PTR_ERR(ebi->smc.layout);
+               goto put_node;
+       }
 
        ebi->smc.clk = of_clk_get(smc_np, 0);
        if (IS_ERR(ebi->smc.clk)) {
-               if (PTR_ERR(ebi->smc.clk) != -ENOENT)
-                       return PTR_ERR(ebi->smc.clk);
+               if (PTR_ERR(ebi->smc.clk) != -ENOENT) {
+                       ret = PTR_ERR(ebi->smc.clk);
+                       goto put_node;
+               }
 
                ebi->smc.clk = NULL;
        }
+       of_node_put(smc_np);
        ret = clk_prepare_enable(ebi->smc.clk);
        if (ret)
                return ret;
@@ -608,6 +615,10 @@ static int atmel_ebi_probe(struct platform_device *pdev)
        }
 
        return of_platform_populate(np, NULL, NULL, dev);
+
+put_node:
+       of_node_put(smc_np);
+       return ret;
 }
 
 static __maybe_unused int atmel_ebi_resume(struct device *dev)
index 2f6939da21cdcef233be890d16fb8aadfe13a3ee..e83b61c925a4fdd061c89f8901a81bfeb50a7c3a 100644 (file)
@@ -287,8 +287,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
        }
 
        /* legacy dts may still use "simple-bus" compatible */
-       ret = of_platform_populate(dev->dev.of_node, NULL, NULL,
-                                       &dev->dev);
+       ret = of_platform_default_populate(dev->dev.of_node, NULL, &dev->dev);
        if (ret)
                goto err_free_nandirq;
 
index e4cc64f560196d5579839e53dc7fc6d100c5f48e..2e545f473cc68fb3bd675518027cb3bfeaef46eb 100644 (file)
@@ -651,6 +651,7 @@ static int rpcif_probe(struct platform_device *pdev)
        struct platform_device *vdev;
        struct device_node *flash;
        const char *name;
+       int ret;
 
        flash = of_get_next_child(pdev->dev.of_node, NULL);
        if (!flash) {
@@ -674,7 +675,14 @@ static int rpcif_probe(struct platform_device *pdev)
                return -ENOMEM;
        vdev->dev.parent = &pdev->dev;
        platform_set_drvdata(pdev, vdev);
-       return platform_device_add(vdev);
+
+       ret = platform_device_add(vdev);
+       if (ret) {
+               platform_device_put(vdev);
+               return ret;
+       }
+
+       return 0;
 }
 
 static int rpcif_remove(struct platform_device *pdev)
index e90adfa5795050bdd41a48005d088e793d9bcd24..9b3ba2df71c75b913768139272b00d232151a174 100644 (file)
@@ -6658,13 +6658,13 @@ static int mpt_summary_proc_show(struct seq_file *m, void *v)
 static int mpt_version_proc_show(struct seq_file *m, void *v)
 {
        u8       cb_idx;
-       int      scsi, fc, sas, lan, ctl, targ, dmp;
+       int      scsi, fc, sas, lan, ctl, targ;
        char    *drvname;
 
        seq_printf(m, "%s-%s\n", "mptlinux", MPT_LINUX_VERSION_COMMON);
        seq_printf(m, "  Fusion MPT base driver\n");
 
-       scsi = fc = sas = lan = ctl = targ = dmp = 0;
+       scsi = fc = sas = lan = ctl = targ = 0;
        for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
                drvname = NULL;
                if (MptCallbacks[cb_idx]) {
index e008d82e4ba3a276de80a3fa81e9a23d397ddb91..a13506dd81194532fa047da82423fe9946d8a2a5 100644 (file)
@@ -111,10 +111,10 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
 
        if (contiguous) {
                if (is_power_of_2(page_size))
-                       paddr = (u64) (uintptr_t) gen_pool_dma_alloc_align(vm->dram_pg_pool,
-                                                               total_size, NULL, page_size);
+                       paddr = (uintptr_t) gen_pool_dma_alloc_align(vm->dram_pg_pool,
+                                                                    total_size, NULL, page_size);
                else
-                       paddr = (u64) (uintptr_t) gen_pool_alloc(vm->dram_pg_pool, total_size);
+                       paddr = gen_pool_alloc(vm->dram_pg_pool, total_size);
                if (!paddr) {
                        dev_err(hdev->dev,
                                "failed to allocate %llu contiguous pages with total size of %llu\n",
@@ -150,12 +150,12 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
                for (i = 0 ; i < num_pgs ; i++) {
                        if (is_power_of_2(page_size))
                                phys_pg_pack->pages[i] =
-                                               (u64) gen_pool_dma_alloc_align(vm->dram_pg_pool,
-                                                                               page_size, NULL,
-                                                                               page_size);
+                                       (uintptr_t)gen_pool_dma_alloc_align(vm->dram_pg_pool,
+                                                                           page_size, NULL,
+                                                                           page_size);
                        else
-                               phys_pg_pack->pages[i] = (u64) gen_pool_alloc(vm->dram_pg_pool,
-                                                                               page_size);
+                               phys_pg_pack->pages[i] = gen_pool_alloc(vm->dram_pg_pool,
+                                                                       page_size);
                        if (!phys_pg_pack->pages[i]) {
                                dev_err(hdev->dev,
                                        "Failed to allocate device memory (out of memory)\n");
index 4e67c1403cc93bb48302fe8747deec8a7195cea8..506dc900f5c7c391ef8c04dd0158754c0feba705 100644 (file)
@@ -993,7 +993,7 @@ static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
                return -EEXIST;
 
        md->reset_done |= type;
-       err = mmc_hw_reset(host);
+       err = mmc_hw_reset(host->card);
        /* Ensure we switch back to the correct partition */
        if (err) {
                struct mmc_blk_data *main_md =
@@ -1880,6 +1880,31 @@ static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
               brq->data.error || brq->cmd.resp[0] & CMD_ERRORS;
 }
 
+static int mmc_spi_err_check(struct mmc_card *card)
+{
+       u32 status = 0;
+       int err;
+
+       /*
+        * SPI does not have a TRAN state we have to wait on, instead the
+        * card is ready again when it no longer holds the line LOW.
+        * We still have to ensure two things here before we know the write
+        * was successful:
+        * 1. The card has not disconnected during busy and we actually read our
+        * own pull-up, thinking it was still connected, so ensure it
+        * still responds.
+        * 2. Check for any error bits, in particular R1_SPI_IDLE to catch a
+        * just reconnected card after being disconnected during busy.
+        */
+       err = __mmc_send_status(card, &status, 0);
+       if (err)
+               return err;
+       /* All R1 and R2 bits of SPI are errors in our case */
+       if (status)
+               return -EIO;
+       return 0;
+}
+
 static int mmc_blk_busy_cb(void *cb_data, bool *busy)
 {
        struct mmc_blk_busy_data *data = cb_data;
@@ -1903,9 +1928,16 @@ static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
        struct mmc_blk_busy_data cb_data;
        int err;
 
-       if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
+       if (rq_data_dir(req) == READ)
                return 0;
 
+       if (mmc_host_is_spi(card->host)) {
+               err = mmc_spi_err_check(card);
+               if (err)
+                       mqrq->brq.data.bytes_xfered = 0;
+               return err;
+       }
+
        cb_data.card = card;
        cb_data.status = 0;
        err = __mmc_poll_for_busy(card->host, 0, MMC_BLK_TIMEOUT_MS,
@@ -2350,6 +2382,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
        struct mmc_blk_data *md;
        int devidx, ret;
        char cap_str[10];
+       bool cache_enabled = false;
+       bool fua_enabled = false;
 
        devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL);
        if (devidx < 0) {
@@ -2429,13 +2463,17 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
                        md->flags |= MMC_BLK_CMD23;
        }
 
-       if (mmc_card_mmc(card) &&
-           md->flags & MMC_BLK_CMD23 &&
+       if (md->flags & MMC_BLK_CMD23 &&
            ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
             card->ext_csd.rel_sectors)) {
                md->flags |= MMC_BLK_REL_WR;
-               blk_queue_write_cache(md->queue.queue, true, true);
+               fua_enabled = true;
+               cache_enabled = true;
        }
+       if (mmc_cache_enabled(card->host))
+               cache_enabled  = true;
+
+       blk_queue_write_cache(md->queue.queue, cache_enabled, fua_enabled);
 
        string_get_size((u64)size, 512, STRING_UNITS_2,
                        cap_str, sizeof(cap_str));
index 368f10405e132ceedfb722278271bf99e422afeb..c6ae16d40766804357ef51dc01ec50b7a9716a21 100644 (file)
@@ -1995,7 +1995,7 @@ static void mmc_hw_reset_for_init(struct mmc_host *host)
 
 /**
  * mmc_hw_reset - reset the card in hardware
- * @host: MMC host to which the card is attached
+ * @card: card to be reset
  *
  * Hard reset the card. This function is only for upper layers, like the
  * block layer or card drivers. You cannot use it in host drivers (struct
@@ -2003,8 +2003,9 @@ static void mmc_hw_reset_for_init(struct mmc_host *host)
  *
  * Return: 0 on success, -errno on failure
  */
-int mmc_hw_reset(struct mmc_host *host)
+int mmc_hw_reset(struct mmc_card *card)
 {
+       struct mmc_host *host = card->host;
        int ret;
 
        ret = host->bus_ops->hw_reset(host);
index e6a2fd2c6d5c94b62374314c3cd11ed19502abdb..8d9bceeff9864b02c7488432e0cb5d8399b70608 100644 (file)
@@ -2325,10 +2325,9 @@ static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
 static int mmc_test_reset(struct mmc_test_card *test)
 {
        struct mmc_card *card = test->card;
-       struct mmc_host *host = card->host;
        int err;
 
-       err = mmc_hw_reset(host);
+       err = mmc_hw_reset(card);
        if (!err) {
                /*
                 * Reset will re-enable the card's command queue, but tests
index 9c13f2c313658b5dd05ef23b7796b42a93c8117a..4566d7fc9055af49e4f4b98bb4edf65856c9d4c4 100644 (file)
@@ -62,8 +62,8 @@ static int sdmmc_idma_validate_data(struct mmci_host *host,
         * excepted the last element which has no constraint on idmasize
         */
        for_each_sg(data->sg, sg, data->sg_len - 1, i) {
-               if (!IS_ALIGNED(data->sg->offset, sizeof(u32)) ||
-                   !IS_ALIGNED(data->sg->length, SDMMC_IDMA_BURST)) {
+               if (!IS_ALIGNED(sg->offset, sizeof(u32)) ||
+                   !IS_ALIGNED(sg->length, SDMMC_IDMA_BURST)) {
                        dev_err(mmc_dev(host->mmc),
                                "unaligned scatterlist: ofst:%x length:%d\n",
                                data->sg->offset, data->sg->length);
@@ -71,7 +71,7 @@ static int sdmmc_idma_validate_data(struct mmci_host *host,
                }
        }
 
-       if (!IS_ALIGNED(data->sg->offset, sizeof(u32))) {
+       if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
                dev_err(mmc_dev(host->mmc),
                        "unaligned last scatterlist: ofst:%x length:%d\n",
                        data->sg->offset, data->sg->length);
index 2797a9c0f17d86f3ee774a45d02d3566f4ed723e..ddb5ca2f559e2b9b10d985e39ac803643807818f 100644 (file)
@@ -144,9 +144,9 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
                return clk_get_rate(priv->clk);
 
        if (priv->clkh) {
+               /* HS400 with 4TAP needs different clock settings */
                bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
-               bool need_slow_clkh = (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104) ||
-                                     (host->mmc->ios.timing == MMC_TIMING_MMC_HS400);
+               bool need_slow_clkh = host->mmc->ios.timing == MMC_TIMING_MMC_HS400;
                clkh_shift = use_4tap && need_slow_clkh ? 1 : 2;
                ref_clk = priv->clkh;
        }
@@ -396,10 +396,10 @@ static void renesas_sdhi_hs400_complete(struct mmc_host *mmc)
                        SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) |
                        sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2));
 
-       /* Set the sampling clock selection range of HS400 mode */
        sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL,
                       SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN |
-                      0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT);
+                      sd_scc_read32(host, priv,
+                                    SH_MOBILE_SDHI_SCC_DTCNTL));
 
        /* Avoid bad TAP */
        if (bad_taps & BIT(priv->tap_set)) {
index 666cee4c7f7c672de22a86668987d5359514756d..08e838400b526b42020fce0e869d568a2ddfd014 100644 (file)
@@ -241,16 +241,6 @@ static void xenon_voltage_switch(struct sdhci_host *host)
 {
        /* Wait for 5ms after set 1.8V signal enable bit */
        usleep_range(5000, 5500);
-
-       /*
-        * For some reason the controller's Host Control2 register reports
-        * the bit representing 1.8V signaling as 0 when read after it was
-        * written as 1. Subsequent read reports 1.
-        *
-        * Since this may cause some issues, do an empty read of the Host
-        * Control2 register here to circumvent this.
-        */
-       sdhci_readw(host, SDHCI_HOST_CONTROL2);
 }
 
 static unsigned int xenon_get_max_clock(struct sdhci_host *host)
index a7e3eb9befb628482e0afc1e9d905b9eb83519e0..a32050fecabf308e113524427e41fc55ef30178c 100644 (file)
@@ -351,9 +351,6 @@ static ssize_t dev_attribute_show(struct device *dev,
         * we still can use 'ubi->ubi_num'.
         */
        ubi = container_of(dev, struct ubi_device, dev);
-       ubi = ubi_get_device(ubi->ubi_num);
-       if (!ubi)
-               return -ENODEV;
 
        if (attr == &dev_eraseblock_size)
                ret = sprintf(buf, "%d\n", ubi->leb_size);
@@ -382,7 +379,6 @@ static ssize_t dev_attribute_show(struct device *dev,
        else
                ret = -EINVAL;
 
-       ubi_put_device(ubi);
        return ret;
 }
 
@@ -979,9 +975,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
                        goto out_detach;
        }
 
-       /* Make device "available" before it becomes accessible via sysfs */
-       ubi_devices[ubi_num] = ubi;
-
        err = uif_init(ubi);
        if (err)
                goto out_detach;
@@ -1026,6 +1019,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
        wake_up_process(ubi->bgt_thread);
        spin_unlock(&ubi->wl_lock);
 
+       ubi_devices[ubi_num] = ubi;
        ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
        return ubi_num;
 
@@ -1034,7 +1028,6 @@ out_debugfs:
 out_uif:
        uif_close(ubi);
 out_detach:
-       ubi_devices[ubi_num] = NULL;
        ubi_wl_close(ubi);
        ubi_free_all_volumes(ubi);
        vfree(ubi->vtbl);
index 022af59906aa9a6d2b9bfd59e375964c21b3e884..6b5f1ffd961b9430e553e2564f7163598f3a1b63 100644 (file)
@@ -468,7 +468,9 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
                        if (err == UBI_IO_FF_BITFLIPS)
                                scrub = 1;
 
-                       add_aeb(ai, free, pnum, ec, scrub);
+                       ret = add_aeb(ai, free, pnum, ec, scrub);
+                       if (ret)
+                               goto out;
                        continue;
                } else if (err == 0 || err == UBI_IO_BITFLIPS) {
                        dbg_bld("Found non empty PEB:%i in pool", pnum);
@@ -638,8 +640,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
                if (fm_pos >= fm_size)
                        goto fail_bad;
 
-               add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
-                       be32_to_cpu(fmec->ec), 0);
+               ret = add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
+                             be32_to_cpu(fmec->ec), 0);
+               if (ret)
+                       goto fail;
        }
 
        /* read EC values from used list */
@@ -649,8 +653,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
                if (fm_pos >= fm_size)
                        goto fail_bad;
 
-               add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
-                       be32_to_cpu(fmec->ec), 0);
+               ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+                             be32_to_cpu(fmec->ec), 0);
+               if (ret)
+                       goto fail;
        }
 
        /* read EC values from scrub list */
@@ -660,8 +666,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
                if (fm_pos >= fm_size)
                        goto fail_bad;
 
-               add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
-                       be32_to_cpu(fmec->ec), 1);
+               ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+                             be32_to_cpu(fmec->ec), 1);
+               if (ret)
+                       goto fail;
        }
 
        /* read EC values from erase list */
@@ -671,8 +679,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
                if (fm_pos >= fm_size)
                        goto fail_bad;
 
-               add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
-                       be32_to_cpu(fmec->ec), 1);
+               ret = add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
+                             be32_to_cpu(fmec->ec), 1);
+               if (ret)
+                       goto fail;
        }
 
        ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
index 139ee132bfbcfa432bb8b75f3df93858900c07f9..1bc7b3a05604628ebd3a334f8be766efa7ecf471 100644 (file)
@@ -56,16 +56,11 @@ static ssize_t vol_attribute_show(struct device *dev,
 {
        int ret;
        struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
-       struct ubi_device *ubi;
-
-       ubi = ubi_get_device(vol->ubi->ubi_num);
-       if (!ubi)
-               return -ENODEV;
+       struct ubi_device *ubi = vol->ubi;
 
        spin_lock(&ubi->volumes_lock);
        if (!ubi->volumes[vol->vol_id]) {
                spin_unlock(&ubi->volumes_lock);
-               ubi_put_device(ubi);
                return -ENODEV;
        }
        /* Take a reference to prevent volume removal */
@@ -103,7 +98,6 @@ static ssize_t vol_attribute_show(struct device *dev,
        vol->ref_count -= 1;
        ubi_assert(vol->ref_count >= 0);
        spin_unlock(&ubi->volumes_lock);
-       ubi_put_device(ubi);
        return ret;
 }
 
index 15eddca7b4b6623ccb2b17d1d8f9a092ebd90ff5..38e152548126101b1c7a8f85014c55fed4d88de0 100644 (file)
@@ -4027,14 +4027,19 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const v
        return true;
 }
 
-static u32 bond_ip_hash(u32 hash, struct flow_keys *flow)
+static u32 bond_ip_hash(u32 hash, struct flow_keys *flow, int xmit_policy)
 {
        hash ^= (__force u32)flow_get_u32_dst(flow) ^
                (__force u32)flow_get_u32_src(flow);
        hash ^= (hash >> 16);
        hash ^= (hash >> 8);
+
        /* discard lowest hash bit to deal with the common even ports pattern */
-       return hash >> 1;
+       if (xmit_policy == BOND_XMIT_POLICY_LAYER34 ||
+               xmit_policy == BOND_XMIT_POLICY_ENCAP34)
+               return hash >> 1;
+
+       return hash;
 }
 
 /* Generate hash based on xmit policy. If @skb is given it is used to linearize
@@ -4064,7 +4069,7 @@ static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const voi
                        memcpy(&hash, &flow.ports.ports, sizeof(hash));
        }
 
-       return bond_ip_hash(hash, &flow);
+       return bond_ip_hash(hash, &flow, bond->params.xmit_policy);
 }
 
 /**
@@ -5259,7 +5264,7 @@ static u32 bond_sk_hash_l34(struct sock *sk)
        /* L4 */
        memcpy(&hash, &flow.ports.ports, sizeof(hash));
        /* L3 */
-       return bond_ip_hash(hash, &flow);
+       return bond_ip_hash(hash, &flow, BOND_XMIT_POLICY_LAYER34);
 }
 
 static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond,
index a416240d001b71a554c1e1636ca764fa5cff1617..12c15da55664b9de63e2bef6e46477700289f0da 100644 (file)
@@ -1681,9 +1681,6 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
                break;
        case PHY_INTERFACE_MODE_RMII:
                miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
-
-               /* Configure the RMII clock as output: */
-               miicfg |= GSWIP_MII_CFG_RMII_CLK;
                break;
        case PHY_INTERFACE_MODE_RGMII:
        case PHY_INTERFACE_MODE_RGMII_ID:
index b49d05f0e11795718f2f4daf7810f54d218a5c0d..7a9f9ff6dedf38ca56d94159a42be1d20e99a2bf 100644 (file)
@@ -40,8 +40,9 @@ int mv88e6xxx_port_hidden_wait(struct mv88e6xxx_chip *chip)
 {
        int bit = __bf_shf(MV88E6XXX_PORT_RESERVED_1A_BUSY);
 
-       return mv88e6xxx_wait_bit(chip, MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
-                                 MV88E6XXX_PORT_RESERVED_1A, bit, 0);
+       return mv88e6xxx_port_wait_bit(chip,
+                                      MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
+                                      MV88E6XXX_PORT_RESERVED_1A, bit, 0);
 }
 
 int mv88e6xxx_port_hidden_read(struct mv88e6xxx_chip *chip, int block, int port,
index 413b0006e9a211fd8cf37f76f966c67545b47224..9e28219b223df95473dd118af43cc63b85fa4a3b 100644 (file)
@@ -670,6 +670,8 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
        struct ocelot *ocelot = ds->priv;
        struct felix *felix = ocelot_to_felix(ocelot);
        enum dsa_tag_protocol old_proto = felix->tag_proto;
+       bool cpu_port_active = false;
+       struct dsa_port *dp;
        int err;
 
        if (proto != DSA_TAG_PROTO_SEVILLE &&
@@ -677,6 +679,27 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
            proto != DSA_TAG_PROTO_OCELOT_8021Q)
                return -EPROTONOSUPPORT;
 
+       /* We don't support multiple CPU ports, yet the DT blob may have
+        * multiple CPU ports defined. The first CPU port is the active one,
+        * the others are inactive. In this case, DSA will call
+        * ->change_tag_protocol() multiple times, once per CPU port.
+        * Since we implement the tagging protocol change towards "ocelot" or
+        * "seville" as effectively initializing the NPI port, what we are
+        * doing is effectively changing who the NPI port is to the last @cpu
+        * argument passed, which is an unused DSA CPU port and not the one
+        * that should actively pass traffic.
+        * Suppress DSA's calls on CPU ports that are inactive.
+        */
+       dsa_switch_for_each_user_port(dp, ds) {
+               if (dp->cpu_dp->index == cpu) {
+                       cpu_port_active = true;
+                       break;
+               }
+       }
+
+       if (!cpu_port_active)
+               return 0;
+
        felix_del_tag_protocol(ds, cpu, old_proto);
 
        err = felix_set_tag_protocol(ds, cpu, proto);
index 8d382b27e625737a9e2f75d1805755bef5319b71..52a8566071eddd8e63d79ce20febf879537a88b9 100644 (file)
@@ -2316,7 +2316,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
 
        err = dsa_register_switch(ds);
        if (err) {
-               dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
+               dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n");
                goto err_register_ds;
        }
 
index 1aa79735355f13030328377f0f92369bff70f2e4..060165a85fb7d74015100b38975aea84afd60fe4 100644 (file)
@@ -9,34 +9,46 @@ menuconfig NET_DSA_REALTEK
        help
          Select to enable support for Realtek Ethernet switch chips.
 
+         Note that at least one interface driver must be enabled for the
+         subdrivers to be loaded. Moreover, an interface driver cannot achieve
+         anything without at least one subdriver enabled.
+
+if NET_DSA_REALTEK
+
 config NET_DSA_REALTEK_MDIO
-       tristate "Realtek MDIO connected switch driver"
-       depends on NET_DSA_REALTEK
+       tristate "Realtek MDIO interface driver"
        depends on OF
+       depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
+       depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
+       depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
        help
          Select to enable support for registering switches configured
          through MDIO.
 
 config NET_DSA_REALTEK_SMI
-       tristate "Realtek SMI connected switch driver"
-       depends on NET_DSA_REALTEK
+       tristate "Realtek SMI interface driver"
        depends on OF
+       depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
+       depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
+       depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
        help
          Select to enable support for registering switches connected
          through SMI.
 
 config NET_DSA_REALTEK_RTL8365MB
        tristate "Realtek RTL8365MB switch subdriver"
-       depends on NET_DSA_REALTEK
-       depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
+       imply NET_DSA_REALTEK_SMI
+       imply NET_DSA_REALTEK_MDIO
        select NET_DSA_TAG_RTL8_4
        help
          Select to enable support for Realtek RTL8365MB-VC and RTL8367S.
 
 config NET_DSA_REALTEK_RTL8366RB
        tristate "Realtek RTL8366RB switch subdriver"
-       depends on NET_DSA_REALTEK
-       depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
+       imply NET_DSA_REALTEK_SMI
+       imply NET_DSA_REALTEK_MDIO
        select NET_DSA_TAG_RTL4_A
        help
-         Select to enable support for Realtek RTL8366RB
+         Select to enable support for Realtek RTL8366RB.
+
+endif
index 31e1f100e48e98887ca2c4d2f333e014ec73829f..c58f49d558d2415ab69c0a77b43c10a15b57eca9 100644 (file)
@@ -267,7 +267,6 @@ static const struct of_device_id realtek_mdio_of_match[] = {
 #endif
 #if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
        { .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, },
-       { .compatible = "realtek,rtl8367s", .data = &rtl8365mb_variant, },
 #endif
        { /* sentinel */ },
 };
index 2243d3da55b29c002443f38e2f38b8111a3cb74e..45992f79ec8d48da869218970ee96c4bf9546be7 100644 (file)
@@ -546,20 +546,11 @@ static const struct of_device_id realtek_smi_of_match[] = {
                .data = &rtl8366rb_variant,
        },
 #endif
-       {
-               /* FIXME: add support for RTL8366S and more */
-               .compatible = "realtek,rtl8366s",
-               .data = NULL,
-       },
 #if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
        {
                .compatible = "realtek,rtl8365mb",
                .data = &rtl8365mb_variant,
        },
-       {
-               .compatible = "realtek,rtl8367s",
-               .data = &rtl8365mb_variant,
-       },
 #endif
        { /* sentinel */ },
 };
index bd4cb9d7c35d45eda668f9fb32c8770ab12f1923..827993022386c785dcc0bfbdc3b06bf4e27a69fa 100644 (file)
@@ -35,15 +35,6 @@ source "drivers/net/ethernet/aquantia/Kconfig"
 source "drivers/net/ethernet/arc/Kconfig"
 source "drivers/net/ethernet/asix/Kconfig"
 source "drivers/net/ethernet/atheros/Kconfig"
-source "drivers/net/ethernet/broadcom/Kconfig"
-source "drivers/net/ethernet/brocade/Kconfig"
-source "drivers/net/ethernet/cadence/Kconfig"
-source "drivers/net/ethernet/calxeda/Kconfig"
-source "drivers/net/ethernet/cavium/Kconfig"
-source "drivers/net/ethernet/chelsio/Kconfig"
-source "drivers/net/ethernet/cirrus/Kconfig"
-source "drivers/net/ethernet/cisco/Kconfig"
-source "drivers/net/ethernet/cortina/Kconfig"
 
 config CX_ECAT
        tristate "Beckhoff CX5020 EtherCAT master support"
@@ -57,6 +48,14 @@ config CX_ECAT
          To compile this driver as a module, choose M here. The module
          will be called ec_bhf.
 
+source "drivers/net/ethernet/broadcom/Kconfig"
+source "drivers/net/ethernet/cadence/Kconfig"
+source "drivers/net/ethernet/calxeda/Kconfig"
+source "drivers/net/ethernet/cavium/Kconfig"
+source "drivers/net/ethernet/chelsio/Kconfig"
+source "drivers/net/ethernet/cirrus/Kconfig"
+source "drivers/net/ethernet/cisco/Kconfig"
+source "drivers/net/ethernet/cortina/Kconfig"
 source "drivers/net/ethernet/davicom/Kconfig"
 
 config DNET
@@ -85,7 +84,6 @@ source "drivers/net/ethernet/huawei/Kconfig"
 source "drivers/net/ethernet/i825xx/Kconfig"
 source "drivers/net/ethernet/ibm/Kconfig"
 source "drivers/net/ethernet/intel/Kconfig"
-source "drivers/net/ethernet/microsoft/Kconfig"
 source "drivers/net/ethernet/xscale/Kconfig"
 
 config JME
@@ -128,8 +126,9 @@ source "drivers/net/ethernet/mediatek/Kconfig"
 source "drivers/net/ethernet/mellanox/Kconfig"
 source "drivers/net/ethernet/micrel/Kconfig"
 source "drivers/net/ethernet/microchip/Kconfig"
-source "drivers/net/ethernet/moxa/Kconfig"
 source "drivers/net/ethernet/mscc/Kconfig"
+source "drivers/net/ethernet/microsoft/Kconfig"
+source "drivers/net/ethernet/moxa/Kconfig"
 source "drivers/net/ethernet/myricom/Kconfig"
 
 config FEALNX
@@ -141,10 +140,10 @@ config FEALNX
          Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
          cards. <http://www.myson.com.tw/>
 
+source "drivers/net/ethernet/ni/Kconfig"
 source "drivers/net/ethernet/natsemi/Kconfig"
 source "drivers/net/ethernet/neterion/Kconfig"
 source "drivers/net/ethernet/netronome/Kconfig"
-source "drivers/net/ethernet/ni/Kconfig"
 source "drivers/net/ethernet/8390/Kconfig"
 source "drivers/net/ethernet/nvidia/Kconfig"
 source "drivers/net/ethernet/nxp/Kconfig"
@@ -164,6 +163,7 @@ source "drivers/net/ethernet/packetengines/Kconfig"
 source "drivers/net/ethernet/pasemi/Kconfig"
 source "drivers/net/ethernet/pensando/Kconfig"
 source "drivers/net/ethernet/qlogic/Kconfig"
+source "drivers/net/ethernet/brocade/Kconfig"
 source "drivers/net/ethernet/qualcomm/Kconfig"
 source "drivers/net/ethernet/rdc/Kconfig"
 source "drivers/net/ethernet/realtek/Kconfig"
@@ -171,10 +171,10 @@ source "drivers/net/ethernet/renesas/Kconfig"
 source "drivers/net/ethernet/rocker/Kconfig"
 source "drivers/net/ethernet/samsung/Kconfig"
 source "drivers/net/ethernet/seeq/Kconfig"
-source "drivers/net/ethernet/sfc/Kconfig"
 source "drivers/net/ethernet/sgi/Kconfig"
 source "drivers/net/ethernet/silan/Kconfig"
 source "drivers/net/ethernet/sis/Kconfig"
+source "drivers/net/ethernet/sfc/Kconfig"
 source "drivers/net/ethernet/smsc/Kconfig"
 source "drivers/net/ethernet/socionext/Kconfig"
 source "drivers/net/ethernet/stmicro/Kconfig"
index 33f1a1377588bda47db7a76c94421a3f84d3ac4f..24d715c28a355793e6fcb0b8c94877c887b2d1ca 100644 (file)
@@ -486,8 +486,8 @@ int aq_nic_start(struct aq_nic_s *self)
        if (err < 0)
                goto err_exit;
 
-       for (i = 0U, aq_vec = self->aq_vec[0];
-               self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+       for (i = 0U; self->aq_vecs > i; ++i) {
+               aq_vec = self->aq_vec[i];
                err = aq_vec_start(aq_vec);
                if (err < 0)
                        goto err_exit;
@@ -517,8 +517,8 @@ int aq_nic_start(struct aq_nic_s *self)
                mod_timer(&self->polling_timer, jiffies +
                          AQ_CFG_POLLING_TIMER_INTERVAL);
        } else {
-               for (i = 0U, aq_vec = self->aq_vec[0];
-                       self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+               for (i = 0U; self->aq_vecs > i; ++i) {
+                       aq_vec = self->aq_vec[i];
                        err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
                                                    aq_vec_isr, aq_vec,
                                                    aq_vec_get_affinity_mask(aq_vec));
index 797a95142d1f44dbc0ed454e2daf955ec8fc90ea..3a529ee8c834061666a55bb2bb1ff62853780876 100644 (file)
@@ -444,22 +444,22 @@ err_exit:
 
 static int aq_pm_freeze(struct device *dev)
 {
-       return aq_suspend_common(dev, false);
+       return aq_suspend_common(dev, true);
 }
 
 static int aq_pm_suspend_poweroff(struct device *dev)
 {
-       return aq_suspend_common(dev, true);
+       return aq_suspend_common(dev, false);
 }
 
 static int aq_pm_thaw(struct device *dev)
 {
-       return atl_resume_common(dev, false);
+       return atl_resume_common(dev, true);
 }
 
 static int aq_pm_resume_restore(struct device *dev)
 {
-       return atl_resume_common(dev, true);
+       return atl_resume_common(dev, false);
 }
 
 static const struct dev_pm_ops aq_pm_ops = {
index f4774cf051c9780cfc434450673bb82d175e2736..6ab1f3212d2463a53ac35fc41f1aa3706d376066 100644 (file)
@@ -43,8 +43,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
        if (!self) {
                err = -EINVAL;
        } else {
-               for (i = 0U, ring = self->ring[0];
-                       self->tx_rings > i; ++i, ring = self->ring[i]) {
+               for (i = 0U; self->tx_rings > i; ++i) {
+                       ring = self->ring[i];
                        u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
                        ring[AQ_VEC_RX_ID].stats.rx.polls++;
                        u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
@@ -182,8 +182,8 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
        self->aq_hw_ops = aq_hw_ops;
        self->aq_hw = aq_hw;
 
-       for (i = 0U, ring = self->ring[0];
-               self->tx_rings > i; ++i, ring = self->ring[i]) {
+       for (i = 0U; self->tx_rings > i; ++i) {
+               ring = self->ring[i];
                err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX);
                if (err < 0)
                        goto err_exit;
@@ -224,8 +224,8 @@ int aq_vec_start(struct aq_vec_s *self)
        unsigned int i = 0U;
        int err = 0;
 
-       for (i = 0U, ring = self->ring[0];
-               self->tx_rings > i; ++i, ring = self->ring[i]) {
+       for (i = 0U; self->tx_rings > i; ++i) {
+               ring = self->ring[i];
                err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
                                                        &ring[AQ_VEC_TX_ID]);
                if (err < 0)
@@ -248,8 +248,8 @@ void aq_vec_stop(struct aq_vec_s *self)
        struct aq_ring_s *ring = NULL;
        unsigned int i = 0U;
 
-       for (i = 0U, ring = self->ring[0];
-               self->tx_rings > i; ++i, ring = self->ring[i]) {
+       for (i = 0U; self->tx_rings > i; ++i) {
+               ring = self->ring[i];
                self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
                                                 &ring[AQ_VEC_TX_ID]);
 
@@ -268,8 +268,8 @@ void aq_vec_deinit(struct aq_vec_s *self)
        if (!self)
                goto err_exit;
 
-       for (i = 0U, ring = self->ring[0];
-               self->tx_rings > i; ++i, ring = self->ring[i]) {
+       for (i = 0U; self->tx_rings > i; ++i) {
+               ring = self->ring[i];
                aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
                aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
        }
@@ -297,8 +297,8 @@ void aq_vec_ring_free(struct aq_vec_s *self)
        if (!self)
                goto err_exit;
 
-       for (i = 0U, ring = self->ring[0];
-               self->tx_rings > i; ++i, ring = self->ring[i]) {
+       for (i = 0U; self->tx_rings > i; ++i) {
+               ring = self->ring[i];
                aq_ring_free(&ring[AQ_VEC_TX_ID]);
                if (i < self->rx_rings)
                        aq_ring_free(&ring[AQ_VEC_RX_ID]);
index 2dd79af9411bbc2042b043601d5a89dbbe4df68b..bf1ec8fdc2adc0a62e06393c41bca7e1afa70c54 100644 (file)
@@ -76,7 +76,7 @@ static inline void bcmgenet_writel(u32 value, void __iomem *offset)
        if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
                __raw_writel(value, offset);
        else
-               writel(value, offset);
+               writel_relaxed(value, offset);
 }
 
 static inline u32 bcmgenet_readl(void __iomem *offset)
@@ -84,7 +84,7 @@ static inline u32 bcmgenet_readl(void __iomem *offset)
        if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
                return __raw_readl(offset);
        else
-               return readl(offset);
+               return readl_relaxed(offset);
 }
 
 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
@@ -2035,6 +2035,11 @@ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
        return skb;
 }
 
+static void bcmgenet_hide_tsb(struct sk_buff *skb)
+{
+       __skb_pull(skb, sizeof(struct status_64));
+}
+
 static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -2141,6 +2146,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        GENET_CB(skb)->last_cb = tx_cb_ptr;
+
+       bcmgenet_hide_tsb(skb);
        skb_tx_timestamp(skb);
 
        /* Decrement total BD count and advance our write pointer */
index 800d5ced580003c5eaff710ccf9ec8daf1dd68b1..e475be29845c669a0bf734ddc654790339bf96aa 100644 (file)
@@ -1658,6 +1658,7 @@ static void macb_tx_restart(struct macb_queue *queue)
        unsigned int head = queue->tx_head;
        unsigned int tail = queue->tx_tail;
        struct macb *bp = queue->bp;
+       unsigned int head_idx, tbqp;
 
        if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
                queue_writel(queue, ISR, MACB_BIT(TXUBR));
@@ -1665,6 +1666,13 @@ static void macb_tx_restart(struct macb_queue *queue)
        if (head == tail)
                return;
 
+       tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
+       tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
+       head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head));
+
+       if (tbqp == head_idx)
+               return;
+
        macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
 }
 
index d5356db7539a43773e9496c29dc707995aaa00d2..caf48023f8ea5419cf5aec5ddd096f11e8ae43b3 100644 (file)
@@ -1835,11 +1835,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
                priv->rxdes0_edorr_mask = BIT(30);
                priv->txdes0_edotr_mask = BIT(30);
                priv->is_aspeed = true;
-               /* Disable ast2600 problematic HW arbitration */
-               if (of_device_is_compatible(np, "aspeed,ast2600-mac")) {
-                       iowrite32(FTGMAC100_TM_DEFAULT,
-                                 priv->base + FTGMAC100_OFFSET_TM);
-               }
        } else {
                priv->rxdes0_edorr_mask = BIT(15);
                priv->txdes0_edotr_mask = BIT(15);
@@ -1911,6 +1906,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
                err = ftgmac100_setup_clk(priv);
                if (err)
                        goto err_phy_connect;
+
+               /* Disable ast2600 problematic HW arbitration */
+               if (of_device_is_compatible(np, "aspeed,ast2600-mac"))
+                       iowrite32(FTGMAC100_TM_DEFAULT,
+                                 priv->base + FTGMAC100_OFFSET_TM);
        }
 
        /* Default ring sizes */
index 763d2c7b5fb1a78225ad757e1aee7578ab310c36..5750f9a56393a038c3e50eb0502218f7696b33d0 100644 (file)
@@ -489,11 +489,15 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
        info->phc_index = -1;
 
        fman_node = of_get_parent(mac_node);
-       if (fman_node)
+       if (fman_node) {
                ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
+               of_node_put(fman_node);
+       }
 
-       if (ptp_node)
+       if (ptp_node) {
                ptp_dev = of_find_device_by_node(ptp_node);
+               of_node_put(ptp_node);
+       }
 
        if (ptp_dev)
                ptp = platform_get_drvdata(ptp_dev);
index 7edf8569514ccdc49c11073926ebeba315435e8e..928d934cb21a5af1a0fbdc144a12dbee5e4e1960 100644 (file)
@@ -1065,19 +1065,23 @@ int hns_mac_init(struct dsaf_device *dsaf_dev)
        device_for_each_child_node(dsaf_dev->dev, child) {
                ret = fwnode_property_read_u32(child, "reg", &port_id);
                if (ret) {
+                       fwnode_handle_put(child);
                        dev_err(dsaf_dev->dev,
                                "get reg fail, ret=%d!\n", ret);
                        return ret;
                }
                if (port_id >= max_port_num) {
+                       fwnode_handle_put(child);
                        dev_err(dsaf_dev->dev,
                                "reg(%u) out of range!\n", port_id);
                        return -EINVAL;
                }
                mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
                                      GFP_KERNEL);
-               if (!mac_cb)
+               if (!mac_cb) {
+                       fwnode_handle_put(child);
                        return -ENOMEM;
+               }
                mac_cb->fw_port = child;
                mac_cb->mac_id = (u8)port_id;
                dsaf_dev->mac_cb[port_id] = mac_cb;
index 0c60f41fca8a6f25446131feaa47ca04e294c837..f3c9395d8351cb31108973e1867332e00026c6ac 100644 (file)
@@ -75,7 +75,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
                ret = hclge_comm_cmd_send(hw, &desc, 1);
                if (ret) {
                        dev_err(&hw->cmq.csq.pdev->dev,
-                               "failed to get tqp stat, ret = %d, tx = %u.\n",
+                               "failed to get tqp stat, ret = %d, rx = %u.\n",
                                ret, i);
                        return ret;
                }
@@ -89,7 +89,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
                ret = hclge_comm_cmd_send(hw, &desc, 1);
                if (ret) {
                        dev_err(&hw->cmq.csq.pdev->dev,
-                               "failed to get tqp stat, ret = %d, rx = %u.\n",
+                               "failed to get tqp stat, ret = %d, tx = %u.\n",
                                ret, i);
                        return ret;
                }
index 44d9b560b3374f8f2ad8d7b0b1596e1b4f7bce50..93aeb615191d9064aee2be7e1f21c0003fb75322 100644 (file)
@@ -562,12 +562,12 @@ static void hns3_dbg_tx_spare_info(struct hns3_enet_ring *ring, char *buf,
 
        for (i = 0; i < ring_num; i++) {
                j = 0;
-               sprintf(result[j++], "%8u", i);
-               sprintf(result[j++], "%9u", ring->tx_copybreak);
-               sprintf(result[j++], "%3u", tx_spare->len);
-               sprintf(result[j++], "%3u", tx_spare->next_to_use);
-               sprintf(result[j++], "%3u", tx_spare->next_to_clean);
-               sprintf(result[j++], "%3u", tx_spare->last_to_clean);
+               sprintf(result[j++], "%u", i);
+               sprintf(result[j++], "%u", ring->tx_copybreak);
+               sprintf(result[j++], "%u", tx_spare->len);
+               sprintf(result[j++], "%u", tx_spare->next_to_use);
+               sprintf(result[j++], "%u", tx_spare->next_to_clean);
+               sprintf(result[j++], "%u", tx_spare->last_to_clean);
                sprintf(result[j++], "%pad", &tx_spare->dma);
                hns3_dbg_fill_content(content, sizeof(content),
                                      tx_spare_info_items,
@@ -598,35 +598,35 @@ static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring,
        u32 base_add_l, base_add_h;
        u32 j = 0;
 
-       sprintf(result[j++], "%8u", index);
+       sprintf(result[j++], "%u", index);
 
-       sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+       sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
                HNS3_RING_RX_RING_BD_NUM_REG));
 
-       sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+       sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
                HNS3_RING_RX_RING_BD_LEN_REG));
 
-       sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+       sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
                HNS3_RING_RX_RING_TAIL_REG));
 
-       sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+       sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
                HNS3_RING_RX_RING_HEAD_REG));
 
-       sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+       sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
                HNS3_RING_RX_RING_FBDNUM_REG));
 
-       sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+       sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
                HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
-       sprintf(result[j++], "%9u", ring->rx_copybreak);
+       sprintf(result[j++], "%u", ring->rx_copybreak);
 
-       sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base +
+       sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
                HNS3_RING_EN_REG) ? "on" : "off");
 
        if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
-               sprintf(result[j++], "%10s", readl_relaxed(ring->tqp->io_base +
+               sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
                        HNS3_RING_RX_EN_REG) ? "on" : "off");
        else
-               sprintf(result[j++], "%10s", "NA");
+               sprintf(result[j++], "%s", "NA");
 
        base_add_h = readl_relaxed(ring->tqp->io_base +
                                        HNS3_RING_RX_RING_BASEADDR_H_REG);
@@ -700,36 +700,36 @@ static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring,
        u32 base_add_l, base_add_h;
        u32 j = 0;
 
-       sprintf(result[j++], "%8u", index);
-       sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+       sprintf(result[j++], "%u", index);
+       sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
                HNS3_RING_TX_RING_BD_NUM_REG));
 
-       sprintf(result[j++], "%2u", readl_relaxed(ring->tqp->io_base +
+       sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
                HNS3_RING_TX_RING_TC_REG));
 
-       sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+       sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
                HNS3_RING_TX_RING_TAIL_REG));
 
-       sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+       sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
                HNS3_RING_TX_RING_HEAD_REG));
 
-       sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+       sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
                HNS3_RING_TX_RING_FBDNUM_REG));
 
-       sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+       sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
                HNS3_RING_TX_RING_OFFSET_REG));
 
-       sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+       sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
                HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
 
-       sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base +
+       sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
                HNS3_RING_EN_REG) ? "on" : "off");
 
        if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
-               sprintf(result[j++], "%10s", readl_relaxed(ring->tqp->io_base +
+               sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
                        HNS3_RING_TX_EN_REG) ? "on" : "off");
        else
-               sprintf(result[j++], "%10s", "NA");
+               sprintf(result[j++], "%s", "NA");
 
        base_add_h = readl_relaxed(ring->tqp->io_base +
                                        HNS3_RING_TX_RING_BASEADDR_H_REG);
@@ -848,15 +848,15 @@ static void hns3_dump_rx_bd_info(struct hns3_nic_priv *priv,
 {
        unsigned int j = 0;
 
-       sprintf(result[j++], "%5d", idx);
+       sprintf(result[j++], "%d", idx);
        sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.l234_info));
-       sprintf(result[j++], "%7u", le16_to_cpu(desc->rx.pkt_len));
-       sprintf(result[j++], "%4u", le16_to_cpu(desc->rx.size));
+       sprintf(result[j++], "%u", le16_to_cpu(desc->rx.pkt_len));
+       sprintf(result[j++], "%u", le16_to_cpu(desc->rx.size));
        sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.rss_hash));
-       sprintf(result[j++], "%5u", le16_to_cpu(desc->rx.fd_id));
-       sprintf(result[j++], "%8u", le16_to_cpu(desc->rx.vlan_tag));
-       sprintf(result[j++], "%15u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb));
-       sprintf(result[j++], "%11u", le16_to_cpu(desc->rx.ot_vlan_tag));
+       sprintf(result[j++], "%u", le16_to_cpu(desc->rx.fd_id));
+       sprintf(result[j++], "%u", le16_to_cpu(desc->rx.vlan_tag));
+       sprintf(result[j++], "%u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb));
+       sprintf(result[j++], "%u", le16_to_cpu(desc->rx.ot_vlan_tag));
        sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.bd_base_info));
        if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
                u32 ol_info = le32_to_cpu(desc->rx.ol_info);
@@ -930,19 +930,19 @@ static void hns3_dump_tx_bd_info(struct hns3_nic_priv *priv,
 {
        unsigned int j = 0;
 
-       sprintf(result[j++], "%6d", idx);
+       sprintf(result[j++], "%d", idx);
        sprintf(result[j++], "%#llx", le64_to_cpu(desc->addr));
-       sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.vlan_tag));
-       sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.send_size));
+       sprintf(result[j++], "%u", le16_to_cpu(desc->tx.vlan_tag));
+       sprintf(result[j++], "%u", le16_to_cpu(desc->tx.send_size));
        sprintf(result[j++], "%#x",
                le32_to_cpu(desc->tx.type_cs_vlan_tso_len));
-       sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.outer_vlan_tag));
-       sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.tv));
-       sprintf(result[j++], "%10u",
+       sprintf(result[j++], "%u", le16_to_cpu(desc->tx.outer_vlan_tag));
+       sprintf(result[j++], "%u", le16_to_cpu(desc->tx.tv));
+       sprintf(result[j++], "%u",
                le32_to_cpu(desc->tx.ol_type_vlan_len_msec));
        sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_ol4cs));
        sprintf(result[j++], "%#x", le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri));
-       sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.mss_hw_csum));
+       sprintf(result[j++], "%u", le16_to_cpu(desc->tx.mss_hw_csum));
 }
 
 static int hns3_dbg_tx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
index 14dc12c2155d5f2247b466b575b328eb175214f4..a3ee7875d6a7aa4b47c4feb7c7c668022361a582 100644 (file)
@@ -5203,6 +5203,13 @@ static void hns3_state_init(struct hnae3_handle *handle)
                set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state);
 }
 
+static void hns3_state_uninit(struct hnae3_handle *handle)
+{
+       struct hns3_nic_priv *priv  = handle->priv;
+
+       clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
+}
+
 static int hns3_client_init(struct hnae3_handle *handle)
 {
        struct pci_dev *pdev = handle->pdev;
@@ -5320,7 +5327,9 @@ static int hns3_client_init(struct hnae3_handle *handle)
        return ret;
 
 out_reg_netdev_fail:
+       hns3_state_uninit(handle);
        hns3_dbg_uninit(handle);
+       hns3_client_stop(handle);
 out_client_start:
        hns3_free_rx_cpu_rmap(netdev);
        hns3_nic_uninit_irq(priv);
index 6799d16de34b9490f60fe67c94a2619e329e0ca4..7998ca617a92e8c25be2f778fa822729cdbcfecf 100644 (file)
@@ -94,6 +94,13 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
        enum hclge_comm_cmd_status status;
        struct hclge_desc desc;
 
+       if (msg_len > HCLGE_MBX_MAX_MSG_SIZE) {
+               dev_err(&hdev->pdev->dev,
+                       "msg data length(=%u) exceeds maximum(=%u)\n",
+                       msg_len, HCLGE_MBX_MAX_MSG_SIZE);
+               return -EMSGSIZE;
+       }
+
        resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
 
        hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
@@ -176,7 +183,7 @@ static int hclge_get_ring_chain_from_mbx(
        ring_num = req->msg.ring_num;
 
        if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
-               return -ENOMEM;
+               return -EINVAL;
 
        for (i = 0; i < ring_num; i++) {
                if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
@@ -587,9 +594,9 @@ static int hclge_set_vf_mtu(struct hclge_vport *vport,
        return hclge_set_vport_mtu(vport, mtu);
 }
 
-static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
-                                    struct hclge_mbx_vf_to_pf_cmd *mbx_req,
-                                    struct hclge_respond_to_vf_msg *resp_msg)
+static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
+                                   struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+                                   struct hclge_respond_to_vf_msg *resp_msg)
 {
        struct hnae3_handle *handle = &vport->nic;
        struct hclge_dev *hdev = vport->back;
@@ -599,17 +606,18 @@ static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
        if (queue_id >= handle->kinfo.num_tqps) {
                dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n",
                        queue_id, mbx_req->mbx_src_vfid);
-               return;
+               return -EINVAL;
        }
 
        qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
        memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf));
        resp_msg->len = sizeof(qid_in_pf);
+       return 0;
 }
 
-static void hclge_get_rss_key(struct hclge_vport *vport,
-                             struct hclge_mbx_vf_to_pf_cmd *mbx_req,
-                             struct hclge_respond_to_vf_msg *resp_msg)
+static int hclge_get_rss_key(struct hclge_vport *vport,
+                            struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+                            struct hclge_respond_to_vf_msg *resp_msg)
 {
 #define HCLGE_RSS_MBX_RESP_LEN 8
        struct hclge_dev *hdev = vport->back;
@@ -627,13 +635,14 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
                dev_warn(&hdev->pdev->dev,
                         "failed to get the rss hash key, the index(%u) invalid !\n",
                         index);
-               return;
+               return -EINVAL;
        }
 
        memcpy(resp_msg->data,
               &rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
               HCLGE_RSS_MBX_RESP_LEN);
        resp_msg->len = HCLGE_RSS_MBX_RESP_LEN;
+       return 0;
 }
 
 static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
@@ -809,10 +818,10 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
                                        "VF fail(%d) to set mtu\n", ret);
                        break;
                case HCLGE_MBX_GET_QID_IN_PF:
-                       hclge_get_queue_id_in_pf(vport, req, &resp_msg);
+                       ret = hclge_get_queue_id_in_pf(vport, req, &resp_msg);
                        break;
                case HCLGE_MBX_GET_RSS_KEY:
-                       hclge_get_rss_key(vport, req, &resp_msg);
+                       ret = hclge_get_rss_key(vport, req, &resp_msg);
                        break;
                case HCLGE_MBX_GET_LINK_MODE:
                        hclge_get_link_mode(vport, req);
index d60e2016d03c6116062ca882b2d5142db26cebca..e6c8e6d5234f81d000ce63258a8b31df5d6d9d98 100644 (file)
@@ -1009,8 +1009,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
 {
        u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
            link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
-       u16 max_ltr_enc_d = 0;  /* maximum LTR decoded by platform */
-       u16 lat_enc_d = 0;      /* latency decoded */
+       u32 max_ltr_enc_d = 0;  /* maximum LTR decoded by platform */
+       u32 lat_enc_d = 0;      /* latency decoded */
        u16 lat_enc = 0;        /* latency encoded */
 
        if (link) {
index 190590d32fafca1069c75533edf234d4de23bf97..7dfcf78b57fb54dbe67a69ecf1ac8be9acf8b339 100644 (file)
@@ -2871,7 +2871,6 @@ continue_reset:
        running = adapter->state == __IAVF_RUNNING;
 
        if (running) {
-               netdev->flags &= ~IFF_UP;
                netif_carrier_off(netdev);
                netif_tx_stop_all_queues(netdev);
                adapter->link_up = false;
@@ -2988,7 +2987,7 @@ continue_reset:
                 * to __IAVF_RUNNING
                 */
                iavf_up_complete(adapter);
-               netdev->flags |= IFF_UP;
+
                iavf_irq_enable(adapter, true);
        } else {
                iavf_change_state(adapter, __IAVF_DOWN);
@@ -3004,10 +3003,8 @@ continue_reset:
 reset_err:
        mutex_unlock(&adapter->client_lock);
        mutex_unlock(&adapter->crit_lock);
-       if (running) {
+       if (running)
                iavf_change_state(adapter, __IAVF_RUNNING);
-               netdev->flags |= IFF_UP;
-       }
        dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
        iavf_close(netdev);
 }
index 5daade32ea62585c1219568677f82d0cfda39d11..fba178e0760097f70acbcb5288a9d6be38fe4e7d 100644 (file)
@@ -577,7 +577,7 @@ void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
 {
        struct net_device *netdev;
 
-       if (!vsi || vsi->type != ICE_VSI_PF || !vsi->arfs_fltr_list)
+       if (!vsi || vsi->type != ICE_VSI_PF)
                return;
 
        netdev = vsi->netdev;
@@ -599,7 +599,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
        int base_idx, i;
 
        if (!vsi || vsi->type != ICE_VSI_PF)
-               return -EINVAL;
+               return 0;
 
        pf = vsi->back;
        netdev = vsi->netdev;
@@ -636,7 +636,6 @@ void ice_remove_arfs(struct ice_pf *pf)
        if (!pf_vsi)
                return;
 
-       ice_free_cpu_rx_rmap(pf_vsi);
        ice_clear_arfs(pf_vsi);
 }
 
@@ -653,9 +652,5 @@ void ice_rebuild_arfs(struct ice_pf *pf)
                return;
 
        ice_remove_arfs(pf);
-       if (ice_set_cpu_rx_rmap(pf_vsi)) {
-               dev_err(ice_pf_to_dev(pf), "Failed to rebuild aRFS\n");
-               return;
-       }
        ice_init_arfs(pf_vsi);
 }
index 9a84d746a6c4ed9786c372770a4e66189f96dd78..6a463b242c7df2c93eb57cddc8aec5b1d10049ad 100644 (file)
@@ -361,7 +361,8 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        np = netdev_priv(netdev);
        vsi = np->vsi;
 
-       if (ice_is_reset_in_progress(vsi->back->state))
+       if (ice_is_reset_in_progress(vsi->back->state) ||
+           test_bit(ICE_VF_DIS, vsi->back->state))
                return NETDEV_TX_BUSY;
 
        repr = ice_netdev_to_repr(netdev);
index bd58d9d2e56537993ea06e2df6bd72875d31bd2b..6a413331572b6a95e663d5820416e390e0c91539 100644 (file)
@@ -52,7 +52,7 @@ static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { }
 
 static inline int ice_eswitch_configure(struct ice_pf *pf)
 {
-       return -EOPNOTSUPP;
+       return 0;
 }
 
 static inline int ice_eswitch_rebuild(struct ice_pf *pf)
index 2774cbd5b12a2a760a9eb210a6e20c1ba01f8288..6d19c58ccacd46cf485529feb47ccf83c8129ef3 100644 (file)
@@ -2689,6 +2689,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
                return;
 
        vsi->irqs_ready = false;
+       ice_free_cpu_rx_rmap(vsi);
+
        ice_for_each_q_vector(vsi, i) {
                u16 vector = i + base;
                int irq_num;
@@ -2702,7 +2704,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
                        continue;
 
                /* clear the affinity notifier in the IRQ descriptor */
-               irq_set_affinity_notifier(irq_num, NULL);
+               if (!IS_ENABLED(CONFIG_RFS_ACCEL))
+                       irq_set_affinity_notifier(irq_num, NULL);
 
                /* clear the affinity_mask in the IRQ descriptor */
                irq_set_affinity_hint(irq_num, NULL);
index d768925785ca79acf6e6c26c2f7ca8e5f80af0b1..9a0a358a15c254979d884cfd5512d07c3c68c021 100644 (file)
@@ -2510,6 +2510,13 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
                irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
        }
 
+       err = ice_set_cpu_rx_rmap(vsi);
+       if (err) {
+               netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
+                          vsi->vsi_num, ERR_PTR(err));
+               goto free_q_irqs;
+       }
+
        vsi->irqs_ready = true;
        return 0;
 
@@ -3692,20 +3699,12 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
         */
        ice_napi_add(vsi);
 
-       status = ice_set_cpu_rx_rmap(vsi);
-       if (status) {
-               dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n",
-                       vsi->vsi_num, status);
-               goto unroll_napi_add;
-       }
        status = ice_init_mac_fltr(pf);
        if (status)
-               goto free_cpu_rx_map;
+               goto unroll_napi_add;
 
        return 0;
 
-free_cpu_rx_map:
-       ice_free_cpu_rx_rmap(vsi);
 unroll_napi_add:
        ice_tc_indir_block_unregister(vsi);
 unroll_cfg_netdev:
@@ -5167,7 +5166,6 @@ static int __maybe_unused ice_suspend(struct device *dev)
                        continue;
                ice_vsi_free_q_vectors(pf->vsi[v]);
        }
-       ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
        ice_clear_interrupt_scheme(pf);
 
        pci_save_state(pdev);
@@ -6931,12 +6929,15 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
 
        dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
 
+#define ICE_EMP_RESET_SLEEP_MS 5000
        if (reset_type == ICE_RESET_EMPR) {
                /* If an EMP reset has occurred, any previously pending flash
                 * update will have completed. We no longer know whether or
                 * not the NVM update EMP reset is restricted.
                 */
                pf->fw_emp_reset_disabled = false;
+
+               msleep(ICE_EMP_RESET_SLEEP_MS);
        }
 
        err = ice_init_all_ctrlq(hw);
index 4eb0599714f4348d5e7b84314f5902d8a76543dd..13cdb5ea594d2f2e22650f397441ed7916e47952 100644 (file)
@@ -641,6 +641,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
        status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, 0,
                                       orom_data, hw->flash.banks.orom_size);
        if (status) {
+               vfree(orom_data);
                ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n");
                return status;
        }
index 8915a9d39e36264bb5e70a9581eec357b6b11245..0c438219f7a39856623a00d116d5cdec351fbda2 100644 (file)
@@ -1046,8 +1046,8 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
 
        if (!num_vfs) {
                if (!pci_vfs_assigned(pdev)) {
-                       ice_mbx_deinit_snapshot(&pf->hw);
                        ice_free_vfs(pf);
+                       ice_mbx_deinit_snapshot(&pf->hw);
                        if (pf->lag)
                                ice_enable_lag(pf->lag);
                        return 0;
index 69ff4b9297725ae3b476e6a435b28832d9751681..b72606c9e6d03a6e494feaf435fe9ab9ea246f87 100644 (file)
@@ -3625,6 +3625,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
                return;
        }
 
+       mutex_lock(&vf->cfg_lock);
+
        /* Check if VF is disabled. */
        if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
                err = -EPERM;
@@ -3642,32 +3644,20 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
                        err = -EINVAL;
        }
 
-       if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
-               ice_vc_send_msg_to_vf(vf, v_opcode,
-                                     VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
-                                     0);
-               ice_put_vf(vf);
-               return;
-       }
-
 error_handler:
        if (err) {
                ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
                                      NULL, 0);
                dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
                        vf_id, v_opcode, msglen, err);
-               ice_put_vf(vf);
-               return;
+               goto finish;
        }
 
-       /* VF is being configured in another context that triggers a VFR, so no
-        * need to process this message
-        */
-       if (!mutex_trylock(&vf->cfg_lock)) {
-               dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n",
-                        vf->vf_id);
-               ice_put_vf(vf);
-               return;
+       if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
+               ice_vc_send_msg_to_vf(vf, v_opcode,
+                                     VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
+                                     0);
+               goto finish;
        }
 
        switch (v_opcode) {
@@ -3780,6 +3770,7 @@ error_handler:
                         vf_id, v_opcode, err);
        }
 
+finish:
        mutex_unlock(&vf->cfg_lock);
        ice_put_vf(vf);
 }
index 866ee4df9671cf82be5c92791ee993959620f0db..9dd38f66705946f507fd2db2d70af209abf86930 100644 (file)
@@ -415,8 +415,8 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
  */
 static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
 {
+       u32 nb_buffs_extra = 0, nb_buffs = 0;
        union ice_32b_rx_flex_desc *rx_desc;
-       u32 nb_buffs_extra = 0, nb_buffs;
        u16 ntu = rx_ring->next_to_use;
        u16 total_count = count;
        struct xdp_buff **xdp;
@@ -428,6 +428,10 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
                nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
                                                   rx_desc,
                                                   rx_ring->count - ntu);
+               if (nb_buffs_extra != rx_ring->count - ntu) {
+                       ntu += nb_buffs_extra;
+                       goto exit;
+               }
                rx_desc = ICE_RX_DESC(rx_ring, 0);
                xdp = ice_xdp_buf(rx_ring, 0);
                ntu = 0;
@@ -441,6 +445,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
        if (ntu == rx_ring->count)
                ntu = 0;
 
+exit:
        if (rx_ring->next_to_use != ntu)
                ice_release_rx_desc(rx_ring, ntu);
 
index 66ea566488d12bc7d5245e2d2dab959c9b3d3ab5..59d5c467ea6e356abecd381fe3420beac0521f5d 100644 (file)
@@ -156,8 +156,15 @@ void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
 {
        u32 swfw_sync;
 
-       while (igc_get_hw_semaphore_i225(hw))
-               ; /* Empty */
+       /* Releasing the resource requires first getting the HW semaphore.
+        * If we fail to get the semaphore, there is nothing we can do,
+        * except log an error and quit. We are not allowed to hang here
+        * indefinitely, as it may cause denial of service or system crash.
+        */
+       if (igc_get_hw_semaphore_i225(hw)) {
+               hw_dbg("Failed to release SW_FW_SYNC.\n");
+               return;
+       }
 
        swfw_sync = rd32(IGC_SW_FW_SYNC);
        swfw_sync &= ~mask;
index 40dbf4b43234546634a3f8f04243fc651ea1f2e7..6961f65d36b9a41793b028b8e580507f31818cf2 100644 (file)
@@ -581,7 +581,7 @@ static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
         * the lower time out
         */
        for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
-               usleep_range(500, 1000);
+               udelay(50);
                mdic = rd32(IGC_MDIC);
                if (mdic & IGC_MDIC_READY)
                        break;
@@ -638,7 +638,7 @@ static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
         * the lower time out
         */
        for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
-               usleep_range(500, 1000);
+               udelay(50);
                mdic = rd32(IGC_MDIC);
                if (mdic & IGC_MDIC_READY)
                        break;
index 0d6e3215e98f57571e78d1a14541c0764c29c523..653e9f1e35b5c7023d26a78a021cab81a078319f 100644 (file)
@@ -992,6 +992,17 @@ static void igc_ptp_time_restore(struct igc_adapter *adapter)
        igc_ptp_write_i225(adapter, &ts);
 }
 
+static void igc_ptm_stop(struct igc_adapter *adapter)
+{
+       struct igc_hw *hw = &adapter->hw;
+       u32 ctrl;
+
+       ctrl = rd32(IGC_PTM_CTRL);
+       ctrl &= ~IGC_PTM_CTRL_EN;
+
+       wr32(IGC_PTM_CTRL, ctrl);
+}
+
 /**
  * igc_ptp_suspend - Disable PTP work items and prepare for suspend
  * @adapter: Board private structure
@@ -1009,8 +1020,10 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
        adapter->ptp_tx_skb = NULL;
        clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
 
-       if (pci_device_is_present(adapter->pdev))
+       if (pci_device_is_present(adapter->pdev)) {
                igc_ptp_time_save(adapter);
+               igc_ptm_stop(adapter);
+       }
 }
 
 /**
index 939b692ffc335ecad1265a002ae674926cf6a37a..ce843ea9146466d402a97f4e249aec548bcd06ed 100644 (file)
@@ -650,6 +650,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
        return 0;
 
 errout:
+       mutex_destroy(&mlxsw_i2c->cmd.lock);
        i2c_set_clientdata(client, NULL);
 
        return err;
index b73466470f75b15810b8f538ed4b79c818f2a3db..fe663b0ab7086a807f752ecbf98eb7b19bada5b6 100644 (file)
@@ -423,7 +423,7 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
 
        parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
        ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
-                           0, 0, parms.link, tun->fwmark, 0);
+                           0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0);
 
        rt = ip_route_output_key(tun->net, &fl4);
        if (IS_ERR(rt))
index ce5970bdcc6a0791242a6663ae8b1e81f7897ad3..005e56ea5da1204c98e12778aa791b39b56b9dc9 100644 (file)
@@ -346,7 +346,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
 
                        lan966x_mac_process_raw_entry(&raw_entries[column],
                                                      mac, &vid, &dest_idx);
-                       WARN_ON(dest_idx > lan966x->num_phys_ports);
+                       if (WARN_ON(dest_idx >= lan966x->num_phys_ports))
+                               continue;
 
                        /* If the entry in SW is found, then there is nothing
                         * to do
@@ -392,7 +393,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
 
                lan966x_mac_process_raw_entry(&raw_entries[column],
                                              mac, &vid, &dest_idx);
-               WARN_ON(dest_idx > lan966x->num_phys_ports);
+               if (WARN_ON(dest_idx >= lan966x->num_phys_ports))
+                       continue;
 
                mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx);
                if (!mac_entry)
index 1f8c67f0261bf8583ef2bd3da425162188034de5..95830e3e2b1fbfd5e6392d70cf4de9fa594eb75a 100644 (file)
@@ -446,6 +446,12 @@ static bool lan966x_hw_offload(struct lan966x *lan966x, u32 port,
                     ANA_CPU_FWD_CFG_MLD_REDIR_ENA)))
                return true;
 
+       if (eth_type_vlan(skb->protocol)) {
+               skb = skb_vlan_untag(skb);
+               if (unlikely(!skb))
+                       return false;
+       }
+
        if (skb->protocol == htons(ETH_P_IP) &&
            ip_hdr(skb)->protocol == IPPROTO_IGMP)
                return false;
@@ -665,6 +671,9 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
                disable_irq(lan966x->ana_irq);
                lan966x->ana_irq = -ENXIO;
        }
+
+       if (lan966x->ptp_irq)
+               devm_free_irq(lan966x->dev, lan966x->ptp_irq, lan966x);
 }
 
 static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
index ae782778d6dd44739a3493f65c906522f372bc4a..0a1041da43842181a16b1df3f23d3be433eb500e 100644 (file)
@@ -29,10 +29,10 @@ enum {
 
 static u64 lan966x_ptp_get_nominal_value(void)
 {
-       u64 res = 0x304d2df1;
-
-       res <<= 32;
-       return res;
+       /* This is the default value that for each system clock, the time of day
+        * is increased. It has the format 5.59 nanosecond.
+        */
+       return 0x304d4873ecade305;
 }
 
 int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr)
index e3555c94294dfe26eece08b8cc2b2205b3175ad7..df2bee6785598ac0fb5b2eade1a99d2223e9da79 100644 (file)
@@ -322,8 +322,7 @@ static int lan966x_port_prechangeupper(struct net_device *dev,
 
        if (netif_is_bridge_master(info->upper_dev) && !info->linking)
                switchdev_bridge_port_unoffload(port->dev, port,
-                                               &lan966x_switchdev_nb,
-                                               &lan966x_switchdev_blocking_nb);
+                                               NULL, NULL);
 
        return NOTIFY_DONE;
 }
index e443bd8b2d09acd2294d5f9a8735bdf63dd185a1..ca71b62a44dc3b84e8990aae13d355170726ea54 100644 (file)
@@ -551,7 +551,7 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
        struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
        struct ocelot_port *ocelot_port = ocelot->ports[port];
        struct ocelot_vcap_filter *filter;
-       int err;
+       int err = 0;
        u32 val;
 
        list_for_each_entry(filter, &block->rules, list) {
@@ -570,7 +570,7 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
        if (vlan_aware)
                err = ocelot_del_vlan_unaware_pvid(ocelot, port,
                                                   ocelot_port->bridge);
-       else
+       else if (ocelot_port->bridge)
                err = ocelot_add_vlan_unaware_pvid(ocelot, port,
                                                   ocelot_port->bridge);
        if (err)
@@ -629,6 +629,13 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
 {
        int err;
 
+       /* Ignore VID 0 added to our RX filter by the 8021q module, since
+        * that collides with OCELOT_STANDALONE_PVID and changes it from
+        * egress-untagged to egress-tagged.
+        */
+       if (!vid)
+               return 0;
+
        err = ocelot_vlan_member_add(ocelot, port, vid, untagged);
        if (err)
                return err;
@@ -651,6 +658,9 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
        bool del_pvid = false;
        int err;
 
+       if (!vid)
+               return 0;
+
        if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
                del_pvid = true;
 
@@ -2859,6 +2869,8 @@ static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port,
                val = BIT(port);
 
        ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC);
+       ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV4);
+       ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV6);
 }
 
 static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port,
index cd478d2cd871ae46640bff2b0dd06eba384de777..00f6d347eaf75b33ec43c60aea8aca1fcaebdc7a 100644 (file)
 #define TSE_PCS_USE_SGMII_ENA                          BIT(0)
 #define TSE_PCS_IF_USE_SGMII                           0x03
 
-#define SGMII_ADAPTER_CTRL_REG                         0x00
-#define SGMII_ADAPTER_DISABLE                          0x0001
-#define SGMII_ADAPTER_ENABLE                           0x0000
-
 #define AUTONEGO_LINK_TIMER                            20
 
 static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs)
@@ -202,12 +198,8 @@ void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev,
                           unsigned int speed)
 {
        void __iomem *tse_pcs_base = pcs->tse_pcs_base;
-       void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
        u32 val;
 
-       writew(SGMII_ADAPTER_ENABLE,
-              sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
-
        pcs->autoneg = phy_dev->autoneg;
 
        if (phy_dev->autoneg == AUTONEG_ENABLE) {
index 442812c0a4bdccb6d93eb6e754b7800df8fcad7e..694ac25ef426ba2e6a9848b6881b9e3386d32ef6 100644 (file)
 #include <linux/phy.h>
 #include <linux/timer.h>
 
+#define SGMII_ADAPTER_CTRL_REG         0x00
+#define SGMII_ADAPTER_ENABLE           0x0000
+#define SGMII_ADAPTER_DISABLE          0x0001
+
 struct tse_pcs {
        struct device *dev;
        void __iomem *tse_pcs_base;
index b7c2579c963b68a72190a9b43f60dd852614858b..6b447d8f0bd8a3fb25dc3076d8ec5f455512f640 100644 (file)
@@ -18,9 +18,6 @@
 
 #include "altr_tse_pcs.h"
 
-#define SGMII_ADAPTER_CTRL_REG                          0x00
-#define SGMII_ADAPTER_DISABLE                           0x0001
-
 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2
@@ -62,14 +59,13 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
 {
        struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
        void __iomem *splitter_base = dwmac->splitter_base;
-       void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base;
        void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base;
        struct device *dev = dwmac->dev;
        struct net_device *ndev = dev_get_drvdata(dev);
        struct phy_device *phy_dev = ndev->phydev;
        u32 val;
 
-       if ((tse_pcs_base) && (sgmii_adapter_base))
+       if (sgmii_adapter_base)
                writew(SGMII_ADAPTER_DISABLE,
                       sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
 
@@ -93,8 +89,11 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
                writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
        }
 
-       if (tse_pcs_base && sgmii_adapter_base)
+       if (phy_dev && sgmii_adapter_base) {
+               writew(SGMII_ADAPTER_ENABLE,
+                      sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
                tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed);
+       }
 }
 
 static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev)
index 22fea0f67245c7f90d478c61cdaaaa63ac51ceb6..92d32940aff00660663e709fd8b7196500e96e16 100644 (file)
@@ -71,9 +71,9 @@ static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
        writel(value, ioaddr + PTP_TCR);
 
        /* wait for present system time initialize to complete */
-       return readl_poll_timeout(ioaddr + PTP_TCR, value,
+       return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value,
                                 !(value & PTP_TCR_TSINIT),
-                                10000, 100000);
+                                10, 100000);
 }
 
 static int config_addend(void __iomem *ioaddr, u32 addend)
index 16105292b140bbc44c97cd9370c4f6db163dea2d..74e845fa2e07ed8e2430c57c2d24b830be361be8 100644 (file)
@@ -1355,7 +1355,9 @@ static int rr_close(struct net_device *dev)
 
        rrpriv->fw_running = 0;
 
+       spin_unlock_irqrestore(&rrpriv->lock, flags);
        del_timer_sync(&rrpriv->timer);
+       spin_lock_irqsave(&rrpriv->lock, flags);
 
        writel(0, &regs->TxPi);
        writel(0, &regs->IpRxPi);
index 069e8824c264adbb712ed47d24eafd3de43571fe..b00bc8173abea8478954adc5d632e10cb8da7aef 100644 (file)
@@ -460,8 +460,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
                        return RX_HANDLER_CONSUMED;
                *pskb = skb;
                eth = eth_hdr(skb);
-               if (macvlan_forward_source(skb, port, eth->h_source))
+               if (macvlan_forward_source(skb, port, eth->h_source)) {
+                       kfree_skb(skb);
                        return RX_HANDLER_CONSUMED;
+               }
                src = macvlan_hash_lookup(port, eth->h_source);
                if (src && src->mode != MACVLAN_MODE_VEPA &&
                    src->mode != MACVLAN_MODE_BRIDGE) {
@@ -480,8 +482,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
                return RX_HANDLER_PASS;
        }
 
-       if (macvlan_forward_source(skb, port, eth->h_source))
+       if (macvlan_forward_source(skb, port, eth->h_source)) {
+               kfree_skb(skb);
                return RX_HANDLER_CONSUMED;
+       }
        if (macvlan_passthru(port))
                vlan = list_first_or_null_rcu(&port->vlans,
                                              struct macvlan_dev, list);
index 1becb1a731f67555fc44e099bd90ee432086203c..1c1584fca63277a0d6b286e055a72678fa9677fd 100644 (file)
@@ -43,6 +43,11 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
        int rc;
 
        rc = fwnode_irq_get(child, 0);
+       /* Don't wait forever if the IRQ provider doesn't become available,
+        * just fall back to poll mode
+        */
+       if (rc == -EPROBE_DEFER)
+               rc = driver_deferred_probe_check_state(&phy->mdio.dev);
        if (rc == -EPROBE_DEFER)
                return rc;
 
index b6fea119fe137e978aebd20cef4bd3870294a2bb..2b7d0720720b6b2aa38d28139e6db013909beb99 100644 (file)
@@ -880,7 +880,7 @@ static int mv3310_read_status_copper(struct phy_device *phydev)
 
        cssr1 = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_CSSR1);
        if (cssr1 < 0)
-               return val;
+               return cssr1;
 
        /* If the link settings are not resolved, mark the link down */
        if (!(cssr1 & MV_PCS_CSSR1_RESOLVED)) {
index 389df3f4293c8bb0becdc30dc567e706dcbcc671..c2c0e361fd3d7af5eb379a08912df95caae2c27c 100644 (file)
@@ -706,7 +706,6 @@ static int lan87xx_read_status(struct phy_device *phydev)
 static int lan87xx_config_aneg(struct phy_device *phydev)
 {
        u16 ctl = 0;
-       int rc;
 
        switch (phydev->master_slave_set) {
        case MASTER_SLAVE_CFG_MASTER_FORCE:
@@ -722,11 +721,7 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
                return -EOPNOTSUPP;
        }
 
-       rc = phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
-       if (rc == 1)
-               rc = genphy_soft_reset(phydev);
-
-       return rc;
+       return phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
 }
 
 static struct phy_driver microchip_t1_phy_driver[] = {
@@ -748,6 +743,7 @@ static struct phy_driver microchip_t1_phy_driver[] = {
        {
                PHY_ID_MATCH_MODEL(PHY_ID_LAN937X),
                .name           = "Microchip LAN937x T1",
+               .flags          = PHY_POLL_CABLE_TEST,
                .features       = PHY_BASIC_T1_FEATURES,
                .config_init    = lan87xx_config_init,
                .suspend        = genphy_suspend,
index 276a0e42ca8eaa85a96b366ab56f5abda3ff7a27..dbe4c0a4be2cd626824604e75848b4f9f2859a49 100644 (file)
@@ -1124,7 +1124,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 
        /* NETIF_F_LLTX requires to do our own update of trans_start */
        queue = netdev_get_tx_queue(dev, txq);
-       queue->trans_start = jiffies;
+       txq_trans_cond_update(queue);
 
        /* Notify and wake up reader process */
        if (tfile->flags & TUN_FASYNC)
index 1b5714926d816dbf83c59745f0d7bca9935b229c..eb0121a64d6d2ecbc9b868428e9a042094b10013 100644 (file)
@@ -320,7 +320,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
 
        rcu_read_lock();
        rcv = rcu_dereference(priv->peer);
-       if (unlikely(!rcv)) {
+       if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) {
                kfree_skb(skb);
                goto drop;
        }
index 11f26b00a2262bc7e263d1b884d38b4899135204..cbba9d2e8f322155f1add23d85564c14fecfe5b2 100644 (file)
@@ -169,6 +169,24 @@ struct receive_queue {
        struct xdp_rxq_info xdp_rxq;
 };
 
+/* This structure can contain rss message with maximum settings for indirection table and keysize
+ * Note, that default structure that describes RSS configuration virtio_net_rss_config
+ * contains same info but can't handle table values.
+ * In any case, structure would be passed to virtio hw through sg_buf split by parts
+ * because table sizes may be differ according to the device configuration.
+ */
+#define VIRTIO_NET_RSS_MAX_KEY_SIZE     40
+#define VIRTIO_NET_RSS_MAX_TABLE_LEN    128
+struct virtio_net_ctrl_rss {
+       u32 hash_types;
+       u16 indirection_table_mask;
+       u16 unclassified_queue;
+       u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN];
+       u16 max_tx_vq;
+       u8 hash_key_length;
+       u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
+};
+
 /* Control VQ buffers: protected by the rtnl lock */
 struct control_buf {
        struct virtio_net_ctrl_hdr hdr;
@@ -178,6 +196,7 @@ struct control_buf {
        u8 allmulti;
        __virtio16 vid;
        __virtio64 offloads;
+       struct virtio_net_ctrl_rss rss;
 };
 
 struct virtnet_info {
@@ -206,6 +225,14 @@ struct virtnet_info {
        /* Host will merge rx buffers for big packets (shake it! shake it!) */
        bool mergeable_rx_bufs;
 
+       /* Host supports rss and/or hash report */
+       bool has_rss;
+       bool has_rss_hash_report;
+       u8 rss_key_size;
+       u16 rss_indir_table_size;
+       u32 rss_hash_types_supported;
+       u32 rss_hash_types_saved;
+
        /* Has control virtqueue */
        bool has_cvq;
 
@@ -242,13 +269,13 @@ struct virtnet_info {
 };
 
 struct padded_vnet_hdr {
-       struct virtio_net_hdr_mrg_rxbuf hdr;
+       struct virtio_net_hdr_v1_hash hdr;
        /*
         * hdr is in a separate sg buffer, and data sg buffer shares same page
         * with this header sg. This padding makes next sg 16 byte aligned
         * after the header.
         */
-       char padding[4];
+       char padding[12];
 };
 
 static bool is_xdp_frame(void *ptr)
@@ -396,7 +423,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
 
        hdr_len = vi->hdr_len;
        if (vi->mergeable_rx_bufs)
-               hdr_padded_len = sizeof(*hdr);
+               hdr_padded_len = hdr_len;
        else
                hdr_padded_len = sizeof(struct padded_vnet_hdr);
 
@@ -978,6 +1005,24 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                         * xdp.data_meta were adjusted
                         */
                        len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
+
+                       /* recalculate headroom if xdp.data or xdp_data_meta
+                        * were adjusted, note that offset should always point
+                        * to the start of the reserved bytes for virtio_net
+                        * header which are followed by xdp.data, that means
+                        * that offset is equal to the headroom (when buf is
+                        * starting at the beginning of the page, otherwise
+                        * there is a base offset inside the page) but it's used
+                        * with a different starting point (buf start) than
+                        * xdp.data (buf start + vnet hdr size). If xdp.data or
+                        * data_meta were adjusted by the xdp prog then the
+                        * headroom size has changed and so has the offset, we
+                        * can use data_hard_start, which points at buf start +
+                        * vnet hdr size, to calculate the new headroom and use
+                        * it later to compute buf start in page_to_skb()
+                        */
+                       headroom = xdp.data - xdp.data_hard_start - metasize;
+
                        /* We can only create skb based on xdp_page. */
                        if (unlikely(xdp_page != page)) {
                                rcu_read_unlock();
@@ -985,7 +1030,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                                head_skb = page_to_skb(vi, rq, xdp_page, offset,
                                                       len, PAGE_SIZE, false,
                                                       metasize,
-                                                      VIRTIO_XDP_HEADROOM);
+                                                      headroom);
                                return head_skb;
                        }
                        break;
@@ -1123,6 +1168,35 @@ xdp_xmit:
        return NULL;
 }
 
+static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
+                               struct sk_buff *skb)
+{
+       enum pkt_hash_types rss_hash_type;
+
+       if (!hdr_hash || !skb)
+               return;
+
+       switch ((int)hdr_hash->hash_report) {
+       case VIRTIO_NET_HASH_REPORT_TCPv4:
+       case VIRTIO_NET_HASH_REPORT_UDPv4:
+       case VIRTIO_NET_HASH_REPORT_TCPv6:
+       case VIRTIO_NET_HASH_REPORT_UDPv6:
+       case VIRTIO_NET_HASH_REPORT_TCPv6_EX:
+       case VIRTIO_NET_HASH_REPORT_UDPv6_EX:
+               rss_hash_type = PKT_HASH_TYPE_L4;
+               break;
+       case VIRTIO_NET_HASH_REPORT_IPv4:
+       case VIRTIO_NET_HASH_REPORT_IPv6:
+       case VIRTIO_NET_HASH_REPORT_IPv6_EX:
+               rss_hash_type = PKT_HASH_TYPE_L3;
+               break;
+       case VIRTIO_NET_HASH_REPORT_NONE:
+       default:
+               rss_hash_type = PKT_HASH_TYPE_NONE;
+       }
+       skb_set_hash(skb, (unsigned int)hdr_hash->hash_value, rss_hash_type);
+}
+
 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
                        void *buf, unsigned int len, void **ctx,
                        unsigned int *xdp_xmit,
@@ -1157,6 +1231,8 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
                return;
 
        hdr = skb_vnet_hdr(skb);
+       if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
+               virtio_skb_set_hash((const struct virtio_net_hdr_v1_hash *)hdr, skb);
 
        if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
                skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1266,7 +1342,8 @@ static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
                                          struct ewma_pkt_len *avg_pkt_len,
                                          unsigned int room)
 {
-       const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+       struct virtnet_info *vi = rq->vq->vdev->priv;
+       const size_t hdr_len = vi->hdr_len;
        unsigned int len;
 
        if (room)
@@ -2183,6 +2260,174 @@ static void virtnet_get_ringparam(struct net_device *dev,
        ring->tx_pending = ring->tx_max_pending;
 }
 
+static bool virtnet_commit_rss_command(struct virtnet_info *vi)
+{
+       struct net_device *dev = vi->dev;
+       struct scatterlist sgs[4];
+       unsigned int sg_buf_size;
+
+       /* prepare sgs */
+       sg_init_table(sgs, 4);
+
+       sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
+       sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);
+
+       sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
+       sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);
+
+       sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
+                       - offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
+       sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);
+
+       sg_buf_size = vi->rss_key_size;
+       sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);
+
+       if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
+                                 vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
+                                 : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
+               dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
+               return false;
+       }
+       return true;
+}
+
+static void virtnet_init_default_rss(struct virtnet_info *vi)
+{
+       u32 indir_val = 0;
+       int i = 0;
+
+       vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
+       vi->rss_hash_types_saved = vi->rss_hash_types_supported;
+       vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
+                                               ? vi->rss_indir_table_size - 1 : 0;
+       vi->ctrl->rss.unclassified_queue = 0;
+
+       for (; i < vi->rss_indir_table_size; ++i) {
+               indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
+               vi->ctrl->rss.indirection_table[i] = indir_val;
+       }
+
+       vi->ctrl->rss.max_tx_vq = vi->curr_queue_pairs;
+       vi->ctrl->rss.hash_key_length = vi->rss_key_size;
+
+       netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
+}
+
+static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
+{
+       info->data = 0;
+       switch (info->flow_type) {
+       case TCP_V4_FLOW:
+               if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST |
+                                                RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST;
+               }
+               break;
+       case TCP_V6_FLOW:
+               if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST |
+                                                RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST;
+               }
+               break;
+       case UDP_V4_FLOW:
+               if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST |
+                                                RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST;
+               }
+               break;
+       case UDP_V6_FLOW:
+               if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST |
+                                                RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
+                       info->data = RXH_IP_SRC | RXH_IP_DST;
+               }
+               break;
+       case IPV4_FLOW:
+               if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4)
+                       info->data = RXH_IP_SRC | RXH_IP_DST;
+
+               break;
+       case IPV6_FLOW:
+               if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6)
+                       info->data = RXH_IP_SRC | RXH_IP_DST;
+
+               break;
+       default:
+               info->data = 0;
+               break;
+       }
+}
+
+static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
+{
+       u32 new_hashtypes = vi->rss_hash_types_saved;
+       bool is_disable = info->data & RXH_DISCARD;
+       bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3);
+
+       /* supports only 'sd', 'sdfn' and 'r' */
+       if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable))
+               return false;
+
+       switch (info->flow_type) {
+       case TCP_V4_FLOW:
+               new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4);
+               if (!is_disable)
+                       new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
+                               | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0);
+               break;
+       case UDP_V4_FLOW:
+               new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4);
+               if (!is_disable)
+                       new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
+                               | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0);
+               break;
+       case IPV4_FLOW:
+               new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4;
+               if (!is_disable)
+                       new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4;
+               break;
+       case TCP_V6_FLOW:
+               new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6);
+               if (!is_disable)
+                       new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
+                               | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0);
+               break;
+       case UDP_V6_FLOW:
+               new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6);
+               if (!is_disable)
+                       new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
+                               | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0);
+               break;
+       case IPV6_FLOW:
+               new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6;
+               if (!is_disable)
+                       new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6;
+               break;
+       default:
+               /* unsupported flow */
+               return false;
+       }
+
+       /* if unsupported hashtype was set */
+       if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
+               return false;
+
+       if (new_hashtypes != vi->rss_hash_types_saved) {
+               vi->rss_hash_types_saved = new_hashtypes;
+               vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
+               if (vi->dev->features & NETIF_F_RXHASH)
+                       return virtnet_commit_rss_command(vi);
+       }
+
+       return true;
+}
 
 static void virtnet_get_drvinfo(struct net_device *dev,
                                struct ethtool_drvinfo *info)
@@ -2411,6 +2656,92 @@ static void virtnet_update_settings(struct virtnet_info *vi)
                vi->duplex = duplex;
 }
 
+static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
+{
+       return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
+}
+
+static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
+{
+       return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
+}
+
+static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+       int i;
+
+       if (indir) {
+               for (i = 0; i < vi->rss_indir_table_size; ++i)
+                       indir[i] = vi->ctrl->rss.indirection_table[i];
+       }
+
+       if (key)
+               memcpy(key, vi->ctrl->rss.key, vi->rss_key_size);
+
+       if (hfunc)
+               *hfunc = ETH_RSS_HASH_TOP;
+
+       return 0;
+}
+
+static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+       int i;
+
+       if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+               return -EOPNOTSUPP;
+
+       if (indir) {
+               for (i = 0; i < vi->rss_indir_table_size; ++i)
+                       vi->ctrl->rss.indirection_table[i] = indir[i];
+       }
+       if (key)
+               memcpy(vi->ctrl->rss.key, key, vi->rss_key_size);
+
+       virtnet_commit_rss_command(vi);
+
+       return 0;
+}
+
+static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+       int rc = 0;
+
+       switch (info->cmd) {
+       case ETHTOOL_GRXRINGS:
+               info->data = vi->curr_queue_pairs;
+               break;
+       case ETHTOOL_GRXFH:
+               virtnet_get_hashflow(vi, info);
+               break;
+       default:
+               rc = -EOPNOTSUPP;
+       }
+
+       return rc;
+}
+
+static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+       int rc = 0;
+
+       switch (info->cmd) {
+       case ETHTOOL_SRXFH:
+               if (!virtnet_set_hashflow(vi, info))
+                       rc = -EINVAL;
+
+               break;
+       default:
+               rc = -EOPNOTSUPP;
+       }
+
+       return rc;
+}
+
 static const struct ethtool_ops virtnet_ethtool_ops = {
        .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
        .get_drvinfo = virtnet_get_drvinfo,
@@ -2426,6 +2757,12 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
        .set_link_ksettings = virtnet_set_link_ksettings,
        .set_coalesce = virtnet_set_coalesce,
        .get_coalesce = virtnet_get_coalesce,
+       .get_rxfh_key_size = virtnet_get_rxfh_key_size,
+       .get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
+       .get_rxfh = virtnet_get_rxfh,
+       .set_rxfh = virtnet_set_rxfh,
+       .get_rxnfc = virtnet_get_rxnfc,
+       .set_rxnfc = virtnet_set_rxnfc,
 };
 
 static void virtnet_freeze_down(struct virtio_device *vdev)
@@ -2678,6 +3015,16 @@ static int virtnet_set_features(struct net_device *dev,
                vi->guest_offloads = offloads;
        }
 
+       if ((dev->features ^ features) & NETIF_F_RXHASH) {
+               if (features & NETIF_F_RXHASH)
+                       vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
+               else
+                       vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
+
+               if (!virtnet_commit_rss_command(vi))
+                       return -EINVAL;
+       }
+
        return 0;
 }
 
@@ -2851,7 +3198,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
  */
 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
 {
-       const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+       const unsigned int hdr_len = vi->hdr_len;
        unsigned int rq_size = virtqueue_get_vring_size(vq);
        unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
        unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
@@ -3072,6 +3419,10 @@ static bool virtnet_validate_features(struct virtio_device *vdev)
                             "VIRTIO_NET_F_CTRL_VQ") ||
             VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
             VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
+                            "VIRTIO_NET_F_CTRL_VQ") ||
+            VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
+                            "VIRTIO_NET_F_CTRL_VQ") ||
+            VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
                             "VIRTIO_NET_F_CTRL_VQ"))) {
                return false;
        }
@@ -3112,13 +3463,14 @@ static int virtnet_probe(struct virtio_device *vdev)
        u16 max_queue_pairs;
        int mtu;
 
-       /* Find if host supports multiqueue virtio_net device */
-       err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
-                                  struct virtio_net_config,
-                                  max_virtqueue_pairs, &max_queue_pairs);
+       /* Find if host supports multiqueue/rss virtio_net device */
+       max_queue_pairs = 1;
+       if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
+               max_queue_pairs =
+                    virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs));
 
        /* We need at least 2 queue's */
-       if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
+       if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
            max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
            !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
                max_queue_pairs = 1;
@@ -3206,8 +3558,33 @@ static int virtnet_probe(struct virtio_device *vdev)
        if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
                vi->mergeable_rx_bufs = true;
 
-       if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
-           virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
+       if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
+               vi->has_rss_hash_report = true;
+
+       if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
+               vi->has_rss = true;
+
+       if (vi->has_rss || vi->has_rss_hash_report) {
+               vi->rss_indir_table_size =
+                       virtio_cread16(vdev, offsetof(struct virtio_net_config,
+                               rss_max_indirection_table_length));
+               vi->rss_key_size =
+                       virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
+
+               vi->rss_hash_types_supported =
+                   virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
+               vi->rss_hash_types_supported &=
+                               ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX |
+                                 VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
+                                 VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);
+
+               dev->hw_features |= NETIF_F_RXHASH;
+       }
+
+       if (vi->has_rss_hash_report)
+               vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
+       else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
+                virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
                vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
        else
                vi->hdr_len = sizeof(struct virtio_net_hdr);
@@ -3274,6 +3651,9 @@ static int virtnet_probe(struct virtio_device *vdev)
                }
        }
 
+       if (vi->has_rss || vi->has_rss_hash_report)
+               virtnet_init_default_rss(vi);
+
        err = register_netdev(dev);
        if (err) {
                pr_debug("virtio_net: registering device failed\n");
@@ -3405,7 +3785,8 @@ static struct virtio_device_id id_table[] = {
        VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
        VIRTIO_NET_F_CTRL_MAC_ADDR, \
        VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
-       VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY
+       VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
+       VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT
 
 static unsigned int features[] = {
        VIRTNET_FEATURES,
index de97ff98d36e949194b741f5e88b23255a07d43c..8a5e3a6d32d7ce0dec79e3061f6d356d014f47cf 100644 (file)
@@ -651,11 +651,11 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
 
        rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
        if (rd == NULL)
-               return -ENOBUFS;
+               return -ENOMEM;
 
        if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
                kfree(rd);
-               return -ENOBUFS;
+               return -ENOMEM;
        }
 
        rd->remote_ip = *ip;
index 23d2954d97475a551192fee9085a11281896a672..1e5672019922fc2b2d192427ea033123b6f58b9a 100644 (file)
@@ -349,7 +349,7 @@ static int __init cosa_init(void)
                }
        } else {
                cosa_major = register_chrdev(0, "cosa", &cosa_fops);
-               if (!cosa_major) {
+               if (cosa_major < 0) {
                        pr_warn("unable to register chardev\n");
                        err = -EIO;
                        goto out;
index 0fad1331303c0240c497e21a420c9340ab4153fb..aa9a7a5970fda6fbf51b9748f4386b6ce1542a56 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/if_arp.h>
 #include <linux/icmp.h>
 #include <linux/suspend.h>
+#include <net/dst_metadata.h>
 #include <net/icmp.h>
 #include <net/rtnetlink.h>
 #include <net/ip_tunnels.h>
@@ -167,7 +168,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
                goto err_peer;
        }
 
-       mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
+       mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
 
        __skb_queue_head_init(&packets);
        if (!skb_is_gso(skb)) {
index 63e1c2d783c5fe29ca030ac1f2f497d658b5dac9..73693c66cef12182a4cadcb533a3cfcb6b8f5d9c 100644 (file)
@@ -1633,7 +1633,7 @@ static void ath10k_sdio_hif_power_down(struct ath10k *ar)
                return;
        }
 
-       ret = mmc_hw_reset(ar_sdio->func->card->host);
+       ret = mmc_hw_reset(ar_sdio->func->card);
        if (ret)
                ath10k_warn(ar, "unable to reset sdio: %d\n", ret);
 
index d5b83f90d27a1c1751f43855658b7c0d364de994..e6b34b0d61bd3919532419f9cf2ab122197fac94 100644 (file)
@@ -3136,6 +3136,20 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
                        arvif->do_not_send_tmpl = true;
                else
                        arvif->do_not_send_tmpl = false;
+
+               if (vif->bss_conf.he_support) {
+                       ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+                                                           WMI_VDEV_PARAM_BA_MODE,
+                                                           WMI_BA_MODE_BUFFER_SIZE_256);
+                       if (ret)
+                               ath11k_warn(ar->ab,
+                                           "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
+                                           arvif->vdev_id);
+                       else
+                               ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+                                          "Set BA BUFFER SIZE 256 for VDEV: %d\n",
+                                          arvif->vdev_id);
+               }
        }
 
        if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
@@ -3171,14 +3185,6 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
 
                if (arvif->is_up && vif->bss_conf.he_support &&
                    vif->bss_conf.he_oper.params) {
-                       ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
-                                                           WMI_VDEV_PARAM_BA_MODE,
-                                                           WMI_BA_MODE_BUFFER_SIZE_256);
-                       if (ret)
-                               ath11k_warn(ar->ab,
-                                           "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
-                                           arvif->vdev_id);
-
                        param_id = WMI_VDEV_PARAM_HEOPS_0_31;
                        param_value = vif->bss_conf.he_oper.params;
                        ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
index 98090e40e1cf48a264755999bd5d29966cdfc4b9..e2791d45f5f595b3c0d218a9a4b060e6077e438a 100644 (file)
@@ -839,7 +839,7 @@ static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix)
                        continue;
 
                txinfo = IEEE80211_SKB_CB(bf->bf_mpdu);
-               fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0];
+               fi = (struct ath_frame_info *)&txinfo->status.status_driver_data[0];
                if (fi->keyix == keyix)
                        return true;
        }
index d0caf1de2bdec5f57c20f58c69962069fa955937..db83cc4ba810af9a04eb0009ce5992c515fb7775 100644 (file)
@@ -141,8 +141,8 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
 {
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        BUILD_BUG_ON(sizeof(struct ath_frame_info) >
-                    sizeof(tx_info->rate_driver_data));
-       return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
+                    sizeof(tx_info->status.status_driver_data));
+       return (struct ath_frame_info *) &tx_info->status.status_driver_data[0];
 }
 
 static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
@@ -2542,6 +2542,16 @@ skip_tx_complete:
        spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
 }
 
+static void ath_clear_tx_status(struct ieee80211_tx_info *tx_info)
+{
+       void *ptr = &tx_info->status;
+
+       memset(ptr + sizeof(tx_info->status.rates), 0,
+              sizeof(tx_info->status) -
+              sizeof(tx_info->status.rates) -
+              sizeof(tx_info->status.status_driver_data));
+}
+
 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
                             struct ath_tx_status *ts, int nframes, int nbad,
                             int txok)
@@ -2553,6 +2563,8 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
        struct ath_hw *ah = sc->sc_ah;
        u8 i, tx_rateindex;
 
+       ath_clear_tx_status(tx_info);
+
        if (txok)
                tx_info->status.ack_signal = ts->ts_rssi;
 
@@ -2567,6 +2579,13 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
        tx_info->status.ampdu_len = nframes;
        tx_info->status.ampdu_ack_len = nframes - nbad;
 
+       tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
+
+       for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
+               tx_info->status.rates[i].count = 0;
+               tx_info->status.rates[i].idx = -1;
+       }
+
        if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
            (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
                /*
@@ -2588,16 +2607,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
                        tx_info->status.rates[tx_rateindex].count =
                                hw->max_rate_tries;
        }
-
-       for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
-               tx_info->status.rates[i].count = 0;
-               tx_info->status.rates[i].idx = -1;
-       }
-
-       tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
-
-       /* we report airtime in ath_tx_count_airtime(), don't report twice */
-       tx_info->status.tx_time = 0;
 }
 
 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
index ba3c159111d3155c0475385bfc43e063a303c286..212fbbe1cd7ec4c02460af013e2e32e8c85578b8 100644 (file)
@@ -557,7 +557,7 @@ enum brcmf_sdio_frmtype {
        BRCMF_SDIO_FT_SUB,
 };
 
-#define SDIOD_DRVSTR_KEY(chip, pmu)     (((chip) << 16) | (pmu))
+#define SDIOD_DRVSTR_KEY(chip, pmu)     (((unsigned int)(chip) << 16) | (pmu))
 
 /* SDIO Pad drive strength to select value mappings */
 struct sdiod_drive_str {
@@ -4165,7 +4165,7 @@ static int brcmf_sdio_bus_reset(struct device *dev)
 
        /* reset the adapter */
        sdio_claim_host(sdiodev->func1);
-       mmc_hw_reset(sdiodev->func1->card->host);
+       mmc_hw_reset(sdiodev->func1->card);
        sdio_release_host(sdiodev->func1);
 
        brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
index bde9e4bbfffe79d3a4ab7d2beaa68223d75ad26a..4f3238d2a171a7d6a6c7bd92d782c29d13c53195 100644 (file)
@@ -2639,7 +2639,7 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
 
        /* Run a HW reset of the SDIO interface. */
        sdio_claim_host(func);
-       ret = mmc_hw_reset(func->card->host);
+       ret = mmc_hw_reset(func->card);
        sdio_release_host(func);
 
        switch (ret) {
index 8a22ee5816748c5a9f306508b0d2e57735350e29..df85ebc6e1df07a7c2e48c30efa50cdeec9f993f 100644 (file)
@@ -80,7 +80,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
 
        /* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
-       mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
+       mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf);
 
        /* RG_SSUSB_CDR_BR_PE1D = 0x3 */
        mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
index 72fc41ac83c0d0a0c9da1bba7b94b98ccaa2b0bf..9140b0163474438025236d76302a6e2614e0e817 100644 (file)
@@ -146,7 +146,7 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
         * To guarantee that the SDIO card is power cycled, as required to make
         * the FW programming to succeed, let's do a brute force HW reset.
         */
-       mmc_hw_reset(card->host);
+       mmc_hw_reset(card);
 
        sdio_enable_func(func);
        sdio_release_host(func);
index 677fa4bf76d3ba15459032bffed2d8549629a191..e1846d04817f373d89c4e1cf28095ac9eb6346e8 100644 (file)
@@ -366,7 +366,7 @@ static inline void nvme_end_req(struct request *req)
 {
        blk_status_t status = nvme_error_status(nvme_req(req)->status);
 
-       if (unlikely(nvme_req(req)->status != NVME_SC_SUCCESS))
+       if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET)))
                nvme_log_error(req);
        nvme_end_req_zoned(req);
        nvme_trace_bio_complete(req);
@@ -1015,6 +1015,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                        goto out;
        }
 
+       req->rq_flags |= RQF_QUIET;
        ret = nvme_execute_rq(req, at_head);
        if (result && ret >= 0)
                *result = nvme_req(req)->result;
@@ -1287,6 +1288,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
                                 warn_str, cur->nidl);
                        return -1;
                }
+               if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+                       return NVME_NIDT_EUI64_LEN;
                memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
                return NVME_NIDT_EUI64_LEN;
        case NVME_NIDT_NGUID:
@@ -1295,6 +1298,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
                                 warn_str, cur->nidl);
                        return -1;
                }
+               if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+                       return NVME_NIDT_NGUID_LEN;
                memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
                return NVME_NIDT_NGUID_LEN;
        case NVME_NIDT_UUID:
@@ -1303,6 +1308,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
                                 warn_str, cur->nidl);
                        return -1;
                }
+               if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+                       return NVME_NIDT_UUID_LEN;
                uuid_copy(&ids->uuid, data + sizeof(*cur));
                return NVME_NIDT_UUID_LEN;
        case NVME_NIDT_CSI:
@@ -1399,12 +1406,18 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
        if ((*id)->ncap == 0) /* namespace not allocated or attached */
                goto out_free_id;
 
-       if (ctrl->vs >= NVME_VS(1, 1, 0) &&
-           !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
-               memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
-       if (ctrl->vs >= NVME_VS(1, 2, 0) &&
-           !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
-               memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
+
+       if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
+               dev_info(ctrl->device,
+                        "Ignoring bogus Namespace Identifiers\n");
+       } else {
+               if (ctrl->vs >= NVME_VS(1, 1, 0) &&
+                   !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
+                       memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
+               if (ctrl->vs >= NVME_VS(1, 2, 0) &&
+                   !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
+                       memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
+       }
 
        return 0;
 
@@ -1830,9 +1843,6 @@ static void nvme_update_disk_info(struct gendisk *disk,
        nvme_config_discard(disk, ns);
        blk_queue_max_write_zeroes_sectors(disk->queue,
                                           ns->ctrl->max_zeroes_sectors);
-
-       set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) ||
-               test_bit(NVME_NS_FORCE_RO, &ns->flags));
 }
 
 static inline bool nvme_first_scan(struct gendisk *disk)
@@ -1891,6 +1901,8 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
                        goto out_unfreeze;
        }
 
+       set_disk_ro(ns->disk, (id->nsattr & NVME_NS_ATTR_RO) ||
+               test_bit(NVME_NS_FORCE_RO, &ns->flags));
        set_bit(NVME_NS_READY, &ns->flags);
        blk_mq_unfreeze_queue(ns->disk->queue);
 
@@ -1903,6 +1915,9 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
        if (nvme_ns_head_multipath(ns->head)) {
                blk_mq_freeze_queue(ns->head->disk->queue);
                nvme_update_disk_info(ns->head->disk, ns, id);
+               set_disk_ro(ns->head->disk,
+                           (id->nsattr & NVME_NS_ATTR_RO) ||
+                                   test_bit(NVME_NS_FORCE_RO, &ns->flags));
                nvme_mpath_revalidate_paths(ns);
                blk_stack_limits(&ns->head->disk->queue->limits,
                                 &ns->queue->limits, 0);
@@ -3589,15 +3604,20 @@ static const struct attribute_group *nvme_dev_attr_groups[] = {
        NULL,
 };
 
-static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
+static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl,
                unsigned nsid)
 {
        struct nvme_ns_head *h;
 
-       lockdep_assert_held(&subsys->lock);
+       lockdep_assert_held(&ctrl->subsys->lock);
 
-       list_for_each_entry(h, &subsys->nsheads, entry) {
-               if (h->ns_id != nsid)
+       list_for_each_entry(h, &ctrl->subsys->nsheads, entry) {
+               /*
+                * Private namespaces can share NSIDs under some conditions.
+                * In that case we can't use the same ns_head for namespaces
+                * with the same NSID.
+                */
+               if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h))
                        continue;
                if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
                        return h;
@@ -3791,7 +3811,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
        }
 
        mutex_lock(&ctrl->subsys->lock);
-       head = nvme_find_ns_head(ctrl->subsys, nsid);
+       head = nvme_find_ns_head(ctrl, nsid);
        if (!head) {
                ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, ids);
                if (ret) {
@@ -3988,6 +4008,16 @@ static void nvme_ns_remove(struct nvme_ns *ns)
        set_capacity(ns->disk, 0);
        nvme_fault_inject_fini(&ns->fault_inject);
 
+       /*
+        * Ensure that !NVME_NS_READY is seen by other threads to prevent
+        * this ns going back into current_path.
+        */
+       synchronize_srcu(&ns->head->srcu);
+
+       /* wait for concurrent submissions */
+       if (nvme_mpath_clear_current_path(ns))
+               synchronize_srcu(&ns->head->srcu);
+
        mutex_lock(&ns->ctrl->subsys->lock);
        list_del_rcu(&ns->siblings);
        if (list_empty(&ns->head->list)) {
@@ -3999,10 +4029,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
        /* guarantee not available in head->list */
        synchronize_rcu();
 
-       /* wait for concurrent submissions */
-       if (nvme_mpath_clear_current_path(ns))
-               synchronize_srcu(&ns->head->srcu);
-
        if (!nvme_ns_head_multipath(ns->head))
                nvme_cdev_del(&ns->cdev, &ns->cdev_device);
        del_gendisk(ns->disk);
@@ -4480,6 +4506,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
        if (ctrl->queue_count > 1) {
                nvme_queue_scan(ctrl);
                nvme_start_queues(ctrl);
+               nvme_mpath_update(ctrl);
        }
 
        nvme_change_uevent(ctrl, "NVME_EVENT=connected");
index 1b31f19e10537d2f328f7fa7f7551a5fe6e094a7..d464fdf978fbaa449cd353f8b4972bfd4d62f29c 100644 (file)
@@ -482,10 +482,11 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
 
        /*
         * Add a multipath node if the subsystems supports multiple controllers.
-        * We also do this for private namespaces as the namespace sharing data could
-        * change after a rescan.
+        * We also do this for private namespaces as the namespace sharing flag
+        * could change after a rescan.
         */
-       if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath)
+       if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
+           !nvme_is_unique_nsid(ctrl, head) || !multipath)
                return 0;
 
        head->disk = blk_alloc_disk(ctrl->numa_node);
@@ -612,8 +613,17 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
        ns->ana_grpid = le32_to_cpu(desc->grpid);
        ns->ana_state = desc->state;
        clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
-
-       if (nvme_state_is_live(ns->ana_state))
+       /*
+        * nvme_mpath_set_live() will trigger I/O to the multipath path device
+        * and in turn to this path device.  However we cannot accept this I/O
+        * if the controller is not live.  This may deadlock if called from
+        * nvme_mpath_init_identify() and the ctrl will never complete
+        * initialization, preventing I/O from completing.  For this case we
+        * will reprocess the ANA log page in nvme_mpath_update() once the
+        * controller is ready.
+        */
+       if (nvme_state_is_live(ns->ana_state) &&
+           ns->ctrl->state == NVME_CTRL_LIVE)
                nvme_mpath_set_live(ns);
 }
 
@@ -700,6 +710,18 @@ static void nvme_ana_work(struct work_struct *work)
        nvme_read_ana_log(ctrl);
 }
 
+void nvme_mpath_update(struct nvme_ctrl *ctrl)
+{
+       u32 nr_change_groups = 0;
+
+       if (!ctrl->ana_log_buf)
+               return;
+
+       mutex_lock(&ctrl->ana_lock);
+       nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
+       mutex_unlock(&ctrl->ana_lock);
+}
+
 static void nvme_anatt_timeout(struct timer_list *t)
 {
        struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
index f4b674a8ce20eb64d73601432b9bc7d2296100f7..a2b53ca6333590dd7e4cb754435761549a3b1105 100644 (file)
@@ -144,6 +144,11 @@ enum nvme_quirks {
         * encoding the generation sequence number.
         */
        NVME_QUIRK_SKIP_CID_GEN                 = (1 << 17),
+
+       /*
+        * Reports garbage in the namespace identifiers (eui64, nguid, uuid).
+        */
+       NVME_QUIRK_BOGUS_NID                    = (1 << 18),
 };
 
 /*
@@ -723,6 +728,25 @@ static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
                return queue_live;
        return __nvme_check_ready(ctrl, rq, queue_live);
 }
+
+/*
+ * NSID shall be unique for all shared namespaces, or if at least one of the
+ * following conditions is met:
+ *   1. Namespace Management is supported by the controller
+ *   2. ANA is supported by the controller
+ *   3. NVM Set are supported by the controller
+ *
+ * In other case, private namespace are not required to report a unique NSID.
+ */
+static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
+               struct nvme_ns_head *head)
+{
+       return head->shared ||
+               (ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) ||
+               (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) ||
+               (ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
+}
+
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                void *buf, unsigned bufflen);
 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
@@ -782,6 +806,7 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
 void nvme_mpath_remove_disk(struct nvme_ns_head *head);
 int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
 void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
+void nvme_mpath_update(struct nvme_ctrl *ctrl);
 void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
 void nvme_mpath_stop(struct nvme_ctrl *ctrl);
 bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
@@ -853,6 +878,9 @@ static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
 "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
        return 0;
 }
+static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
+{
+}
 static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
 {
 }
index 2e98ac3f3ad684feaa5ece1cd424306e7d91bd08..3aacf1c0d5a5f8cfedb5a03e599f21dd61a0cb0b 100644 (file)
@@ -45,7 +45,7 @@
 #define NVME_MAX_SEGS  127
 
 static int use_threaded_interrupts;
-module_param(use_threaded_interrupts, int, 0);
+module_param(use_threaded_interrupts, int, 0444);
 
 static bool use_cmb_sqes = true;
 module_param(use_cmb_sqes, bool, 0444);
@@ -3409,7 +3409,10 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
        { PCI_VDEVICE(INTEL, 0x5845),   /* Qemu emulated controller */
                .driver_data = NVME_QUIRK_IDENTIFY_CNS |
-                               NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+                               NVME_QUIRK_DISABLE_WRITE_ZEROES |
+                               NVME_QUIRK_BOGUS_NID, },
+       { PCI_VDEVICE(REDHAT, 0x0010),  /* Qemu emulated controller */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x126f, 0x2263),   /* Silicon Motion unidentified */
                .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
        { PCI_DEVICE(0x1bb1, 0x0100),   /* Seagate Nytro Flash Storage */
@@ -3447,6 +3450,10 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
        { PCI_DEVICE(0x2646, 0x2263),   /* KINGSTON A2000 NVMe SSD  */
                .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+       { PCI_DEVICE(0x1e4B, 0x1002),   /* MAXIO MAP1002 */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
+       { PCI_DEVICE(0x1e4B, 0x1202),   /* MAXIO MAP1202 */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
                .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
        { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
@@ -3467,7 +3474,10 @@ static const struct pci_device_id nvme_id_table[] = {
                                NVME_QUIRK_128_BYTES_SQES |
                                NVME_QUIRK_SHARED_TAGS |
                                NVME_QUIRK_SKIP_CID_GEN },
-
+       { PCI_DEVICE(0x144d, 0xa808),   /* Samsung X5 */
+               .driver_data =  NVME_QUIRK_DELAY_BEFORE_CHK_RDY|
+                               NVME_QUIRK_NO_DEEPEST_PS |
+                               NVME_QUIRK_IGNORE_DEV_SUBNQN, },
        { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
        { 0, }
 };
index 46d0dab686dd6b3711e54b577793c7e90a5851c1..397daaf51f1baf878b1e22e3ff881582c40b8aa8 100644 (file)
@@ -988,7 +988,7 @@ void nvmet_execute_async_event(struct nvmet_req *req)
        ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
        mutex_unlock(&ctrl->lock);
 
-       schedule_work(&ctrl->async_event_work);
+       queue_work(nvmet_wq, &ctrl->async_event_work);
 }
 
 void nvmet_execute_keep_alive(struct nvmet_req *req)
index 8fedd1e052fec42cadf7ef5cf26498d0240d11db..e44b2988759ec2b1ed343f410de1cce40f2f48b4 100644 (file)
@@ -1555,7 +1555,7 @@ static void nvmet_port_release(struct config_item *item)
        struct nvmet_port *port = to_nvmet_port(item);
 
        /* Let inflight controllers teardown complete */
-       flush_scheduled_work();
+       flush_workqueue(nvmet_wq);
        list_del(&port->global_entry);
 
        kfree(port->ana_state);
index 64c2d2f3e25c04dcc1edd9899fce1904974c3305..90e75324dae0562146022151335d4810f6911ccc 100644 (file)
@@ -20,6 +20,9 @@ struct workqueue_struct *zbd_wq;
 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
 static DEFINE_IDA(cntlid_ida);
 
+struct workqueue_struct *nvmet_wq;
+EXPORT_SYMBOL_GPL(nvmet_wq);
+
 /*
  * This read/write semaphore is used to synchronize access to configuration
  * information on a target system that will result in discovery log page
@@ -205,7 +208,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
        list_add_tail(&aen->entry, &ctrl->async_events);
        mutex_unlock(&ctrl->lock);
 
-       schedule_work(&ctrl->async_event_work);
+       queue_work(nvmet_wq, &ctrl->async_event_work);
 }
 
 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
@@ -385,7 +388,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
        if (reset_tbkas) {
                pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
                        ctrl->cntlid);
-               schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+               queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
                return;
        }
 
@@ -403,7 +406,7 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
        pr_debug("ctrl %d start keep-alive timer for %d secs\n",
                ctrl->cntlid, ctrl->kato);
 
-       schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+       queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
 }
 
 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
@@ -1120,7 +1123,7 @@ static inline u8 nvmet_cc_iocqes(u32 cc)
 
 static inline bool nvmet_css_supported(u8 cc_css)
 {
-       switch (cc_css <<= NVME_CC_CSS_SHIFT) {
+       switch (cc_css << NVME_CC_CSS_SHIFT) {
        case NVME_CC_CSS_NVM:
        case NVME_CC_CSS_CSI:
                return true;
@@ -1478,7 +1481,7 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
        mutex_lock(&ctrl->lock);
        if (!(ctrl->csts & NVME_CSTS_CFS)) {
                ctrl->csts |= NVME_CSTS_CFS;
-               schedule_work(&ctrl->fatal_err_work);
+               queue_work(nvmet_wq, &ctrl->fatal_err_work);
        }
        mutex_unlock(&ctrl->lock);
 }
@@ -1619,9 +1622,15 @@ static int __init nvmet_init(void)
                goto out_free_zbd_work_queue;
        }
 
+       nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
+       if (!nvmet_wq) {
+               error = -ENOMEM;
+               goto out_free_buffered_work_queue;
+       }
+
        error = nvmet_init_discovery();
        if (error)
-               goto out_free_work_queue;
+               goto out_free_nvmet_work_queue;
 
        error = nvmet_init_configfs();
        if (error)
@@ -1630,7 +1639,9 @@ static int __init nvmet_init(void)
 
 out_exit_discovery:
        nvmet_exit_discovery();
-out_free_work_queue:
+out_free_nvmet_work_queue:
+       destroy_workqueue(nvmet_wq);
+out_free_buffered_work_queue:
        destroy_workqueue(buffered_io_wq);
 out_free_zbd_work_queue:
        destroy_workqueue(zbd_wq);
@@ -1642,6 +1653,7 @@ static void __exit nvmet_exit(void)
        nvmet_exit_configfs();
        nvmet_exit_discovery();
        ida_destroy(&cntlid_ida);
+       destroy_workqueue(nvmet_wq);
        destroy_workqueue(buffered_io_wq);
        destroy_workqueue(zbd_wq);
 
index de90001fc5c4c0de887f8997234c67ebd9ade511..ab2627e17bb97f2b85af5e9e2c191f714079d159 100644 (file)
@@ -1491,7 +1491,7 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
        list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
                if (!nvmet_fc_tgt_a_get(assoc))
                        continue;
-               if (!schedule_work(&assoc->del_work))
+               if (!queue_work(nvmet_wq, &assoc->del_work))
                        /* already deleting - release local reference */
                        nvmet_fc_tgt_a_put(assoc);
        }
@@ -1546,7 +1546,7 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
                        continue;
                assoc->hostport->invalid = 1;
                noassoc = false;
-               if (!schedule_work(&assoc->del_work))
+               if (!queue_work(nvmet_wq, &assoc->del_work))
                        /* already deleting - release local reference */
                        nvmet_fc_tgt_a_put(assoc);
        }
@@ -1592,7 +1592,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
                nvmet_fc_tgtport_put(tgtport);
 
                if (found_ctrl) {
-                       if (!schedule_work(&assoc->del_work))
+                       if (!queue_work(nvmet_wq, &assoc->del_work))
                                /* already deleting - release local reference */
                                nvmet_fc_tgt_a_put(assoc);
                        return;
@@ -2060,7 +2060,7 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
        iod->rqstdatalen = lsreqbuf_len;
        iod->hosthandle = hosthandle;
 
-       schedule_work(&iod->work);
+       queue_work(nvmet_wq, &iod->work);
 
        return 0;
 }
index 54606f1872b4aba8aedbfbfa5ea5a9878e404b9d..5c16372f3b533cc646be2225517dd8d391547b2f 100644 (file)
@@ -360,7 +360,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
                spin_lock(&rport->lock);
                list_add_tail(&rport->ls_list, &tls_req->ls_list);
                spin_unlock(&rport->lock);
-               schedule_work(&rport->ls_work);
+               queue_work(nvmet_wq, &rport->ls_work);
                return ret;
        }
 
@@ -393,7 +393,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
                spin_lock(&rport->lock);
                list_add_tail(&rport->ls_list, &tls_req->ls_list);
                spin_unlock(&rport->lock);
-               schedule_work(&rport->ls_work);
+               queue_work(nvmet_wq, &rport->ls_work);
        }
 
        return 0;
@@ -448,7 +448,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
                spin_lock(&tport->lock);
                list_add_tail(&tport->ls_list, &tls_req->ls_list);
                spin_unlock(&tport->lock);
-               schedule_work(&tport->ls_work);
+               queue_work(nvmet_wq, &tport->ls_work);
                return ret;
        }
 
@@ -480,7 +480,7 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
                spin_lock(&tport->lock);
                list_add_tail(&tport->ls_list, &tls_req->ls_list);
                spin_unlock(&tport->lock);
-               schedule_work(&tport->ls_work);
+               queue_work(nvmet_wq, &tport->ls_work);
        }
 
        return 0;
@@ -520,7 +520,7 @@ fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
        tgt_rscn->tport = tgtport->private;
        INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
 
-       schedule_work(&tgt_rscn->work);
+       queue_work(nvmet_wq, &tgt_rscn->work);
 }
 
 static void
@@ -739,7 +739,7 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
        INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
        kref_init(&tfcp_req->ref);
 
-       schedule_work(&tfcp_req->fcp_rcv_work);
+       queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
 
        return 0;
 }
@@ -921,7 +921,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
 {
        struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
 
-       schedule_work(&tfcp_req->tio_done_work);
+       queue_work(nvmet_wq, &tfcp_req->tio_done_work);
 }
 
 static void
@@ -976,7 +976,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
 
        if (abortio)
                /* leave the reference while the work item is scheduled */
-               WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
+               WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
        else  {
                /*
                 * as the io has already had the done callback made,
index 6485dc8eb974031c28986f2eccba11fd652a4963..f3d58abf11e047b15e7d017f30edf569cfab5e5a 100644 (file)
@@ -283,7 +283,7 @@ static void nvmet_file_execute_flush(struct nvmet_req *req)
        if (!nvmet_check_transfer_len(req, 0))
                return;
        INIT_WORK(&req->f.work, nvmet_file_flush_work);
-       schedule_work(&req->f.work);
+       queue_work(nvmet_wq, &req->f.work);
 }
 
 static void nvmet_file_execute_discard(struct nvmet_req *req)
@@ -343,7 +343,7 @@ static void nvmet_file_execute_dsm(struct nvmet_req *req)
        if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
                return;
        INIT_WORK(&req->f.work, nvmet_file_dsm_work);
-       schedule_work(&req->f.work);
+       queue_work(nvmet_wq, &req->f.work);
 }
 
 static void nvmet_file_write_zeroes_work(struct work_struct *w)
@@ -373,7 +373,7 @@ static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
        if (!nvmet_check_transfer_len(req, 0))
                return;
        INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
-       schedule_work(&req->f.work);
+       queue_work(nvmet_wq, &req->f.work);
 }
 
 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
index 23f9d6f888042f8f6525c8b1cc696c57ab3d0171..59024af2da2e3d22edb5148f4d31dcbcc87768ff 100644 (file)
@@ -166,7 +166,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                iod->req.transfer_len = blk_rq_payload_bytes(req);
        }
 
-       schedule_work(&iod->work);
+       queue_work(nvmet_wq, &iod->work);
        return BLK_STS_OK;
 }
 
@@ -187,7 +187,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
                return;
        }
 
-       schedule_work(&iod->work);
+       queue_work(nvmet_wq, &iod->work);
 }
 
 static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
index d910c6aad4b664166c436344c1033e8ee17015c2..69818752a33a58d3a6dd7b9c6786b320a187ffd0 100644 (file)
@@ -366,6 +366,7 @@ struct nvmet_req {
 
 extern struct workqueue_struct *buffered_io_wq;
 extern struct workqueue_struct *zbd_wq;
+extern struct workqueue_struct *nvmet_wq;
 
 static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
 {
index a4de1e0d518b5b3a94d21605e4a9a6716fc7a14a..5247c24538ebae5d59285cc3e458a528007f6025 100644 (file)
@@ -283,7 +283,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
        if (req->p.use_workqueue || effects) {
                INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
                req->p.rq = rq;
-               schedule_work(&req->p.work);
+               queue_work(nvmet_wq, &req->p.work);
        } else {
                rq->end_io_data = req;
                blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done);
index 2446d0918a41a730acc34ff59c7979a052644189..2fab0b219b255d81d3e10a461a4c067498dd8b63 100644 (file)
@@ -1584,7 +1584,7 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
 
        if (queue->host_qid == 0) {
                /* Let inflight controller teardown complete */
-               flush_scheduled_work();
+               flush_workqueue(nvmet_wq);
        }
 
        ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
@@ -1669,7 +1669,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
 
        if (disconnect) {
                rdma_disconnect(queue->cm_id);
-               schedule_work(&queue->release_work);
+               queue_work(nvmet_wq, &queue->release_work);
        }
 }
 
@@ -1699,7 +1699,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
        mutex_unlock(&nvmet_rdma_queue_mutex);
 
        pr_err("failed to connect queue %d\n", queue->idx);
-       schedule_work(&queue->release_work);
+       queue_work(nvmet_wq, &queue->release_work);
 }
 
 /**
@@ -1773,7 +1773,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
                if (!queue) {
                        struct nvmet_rdma_port *port = cm_id->context;
 
-                       schedule_delayed_work(&port->repair_work, 0);
+                       queue_delayed_work(nvmet_wq, &port->repair_work, 0);
                        break;
                }
                fallthrough;
@@ -1903,7 +1903,7 @@ static void nvmet_rdma_repair_port_work(struct work_struct *w)
        nvmet_rdma_disable_port(port);
        ret = nvmet_rdma_enable_port(port);
        if (ret)
-               schedule_delayed_work(&port->repair_work, 5 * HZ);
+               queue_delayed_work(nvmet_wq, &port->repair_work, 5 * HZ);
 }
 
 static int nvmet_rdma_add_port(struct nvmet_port *nport)
@@ -2053,7 +2053,7 @@ static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data
        }
        mutex_unlock(&nvmet_rdma_queue_mutex);
 
-       flush_scheduled_work();
+       flush_workqueue(nvmet_wq);
 }
 
 static struct ib_client nvmet_rdma_ib_client = {
index 83ca577f72be2407813113f3af6a45680fb08ac3..2793554e622ee63fb8eb8e4af6688fa2c1a4dd9e 100644 (file)
@@ -1269,7 +1269,7 @@ static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
        spin_lock(&queue->state_lock);
        if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
                queue->state = NVMET_TCP_Q_DISCONNECTING;
-               schedule_work(&queue->release_work);
+               queue_work(nvmet_wq, &queue->release_work);
        }
        spin_unlock(&queue->state_lock);
 }
@@ -1684,7 +1684,7 @@ static void nvmet_tcp_listen_data_ready(struct sock *sk)
                goto out;
 
        if (sk->sk_state == TCP_LISTEN)
-               schedule_work(&port->accept_work);
+               queue_work(nvmet_wq, &port->accept_work);
 out:
        read_unlock_bh(&sk->sk_callback_lock);
 }
@@ -1815,7 +1815,7 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
 
        if (sq->qid == 0) {
                /* Let inflight controller teardown complete */
-               flush_scheduled_work();
+               flush_workqueue(nvmet_wq);
        }
 
        queue->nr_cmds = sq->size * 2;
@@ -1876,12 +1876,12 @@ static void __exit nvmet_tcp_exit(void)
 
        nvmet_unregister_transport(&nvmet_tcp_ops);
 
-       flush_scheduled_work();
+       flush_workqueue(nvmet_wq);
        mutex_lock(&nvmet_tcp_queue_mutex);
        list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
                kernel_sock_shutdown(queue->sock, SHUT_RDWR);
        mutex_unlock(&nvmet_tcp_queue_mutex);
-       flush_scheduled_work();
+       flush_workqueue(nvmet_wq);
 
        destroy_workqueue(nvmet_tcp_wq);
 }
index df84d221e3de9e4409b0078e2032896a11b2433f..d270a204324e9cc556de9715b6d9470b28694583 100644 (file)
@@ -766,14 +766,6 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *irqd)
        return irqd->parent_data->hwirq;
 }
 
-static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry,
-                                      struct msi_desc *msi_desc)
-{
-       msi_entry->address = ((u64)msi_desc->msg.address_hi << 32) |
-                             msi_desc->msg.address_lo;
-       msi_entry->data = msi_desc->msg.data;
-}
-
 /*
  * @nr_bm_irqs:                Indicates the number of IRQs that were allocated from
  *                     the bitmap.
@@ -3415,6 +3407,15 @@ static int hv_pci_probe(struct hv_device *hdev,
        hbus->bridge->domain_nr = dom;
 #ifdef CONFIG_X86
        hbus->sysdata.domain = dom;
+#elif defined(CONFIG_ARM64)
+       /*
+        * Set the PCI bus parent to be the corresponding VMbus
+        * device. Then the VMbus device will be assigned as the
+        * ACPI companion in pcibios_root_bridge_prepare() and
+        * pci_dma_configure() will propagate device coherence
+        * information to devices created on the bus.
+        */
+       hbus->sysdata.parent = hdev->device.parent;
 #endif
 
        hbus->hdev = hdev;
index afdcb91601d2bbf93b5221673a67f503a05f679f..1e2d69453771d4aca267b0a64d4d0419c0dc820f 100644 (file)
@@ -187,7 +187,7 @@ source "drivers/perf/hisilicon/Kconfig"
 
 config MARVELL_CN10K_DDR_PMU
        tristate "Enable MARVELL CN10K DRAM Subsystem(DSS) PMU Support"
-       depends on ARM64 || (COMPILE_TEST && 64BIT)
+       depends on ARCH_THUNDER || (COMPILE_TEST && 64BIT)
        help
          Enable perf support for Marvell DDR Performance monitoring
          event on CN10K platform.
index 94ebc1ecace7cf342398e912d748c7fab33c3633..b1b2a55de77fc8c658f8e378ce21df435f1fa4be 100644 (file)
@@ -29,7 +29,7 @@
 #define CNTL_OVER_MASK         0xFFFFFFFE
 
 #define CNTL_CSV_SHIFT         24
-#define CNTL_CSV_MASK          (0xFF << CNTL_CSV_SHIFT)
+#define CNTL_CSV_MASK          (0xFFU << CNTL_CSV_SHIFT)
 
 #define EVENT_CYCLES_ID                0
 #define EVENT_CYCLES_COUNTER   0
index 7640491aab123c6e1dc71c1bca49d68bc31e268d..30234c261b05c33b3616a4c98114dee478f64bbc 100644 (file)
@@ -736,7 +736,7 @@ static struct cluster_pmu *l2_cache_associate_cpu_with_cluster(
 {
        u64 mpidr;
        int cpu_cluster_id;
-       struct cluster_pmu *cluster = NULL;
+       struct cluster_pmu *cluster;
 
        /*
         * This assumes that the cluster_id is in MPIDR[aff1] for
@@ -758,10 +758,10 @@ static struct cluster_pmu *l2_cache_associate_cpu_with_cluster(
                         cluster->cluster_id);
                cpumask_set_cpu(cpu, &cluster->cluster_cpus);
                *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster;
-               break;
+               return cluster;
        }
 
-       return cluster;
+       return NULL;
 }
 
 static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
index 5d4be9735d9d0d59c08a56b7d0d18873d19ade3b..6420ca129548e77baa48b8fc8c76a9756149e0b4 100644 (file)
@@ -2,6 +2,7 @@
 
 # tell define_trace.h where to find the cros ec trace header
 CFLAGS_cros_ec_trace.o:=               -I$(src)
+CFLAGS_cros_ec_sensorhub_ring.o:=      -I$(src)
 
 obj-$(CONFIG_CHROMEOS_LAPTOP)          += chromeos_laptop.o
 obj-$(CONFIG_CHROMEOS_PRIVACY_SCREEN)  += chromeos_privacy_screen.o
@@ -21,7 +22,7 @@ obj-$(CONFIG_CROS_EC_CHARDEV)         += cros_ec_chardev.o
 obj-$(CONFIG_CROS_EC_LIGHTBAR)         += cros_ec_lightbar.o
 obj-$(CONFIG_CROS_EC_VBC)              += cros_ec_vbc.o
 obj-$(CONFIG_CROS_EC_DEBUGFS)          += cros_ec_debugfs.o
-cros-ec-sensorhub-objs                 := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o cros_ec_trace.o
+cros-ec-sensorhub-objs                 := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o
 obj-$(CONFIG_CROS_EC_SENSORHUB)                += cros-ec-sensorhub.o
 obj-$(CONFIG_CROS_EC_SYSFS)            += cros_ec_sysfs.o
 obj-$(CONFIG_CROS_USBPD_LOGGER)                += cros_usbpd_logger.o
index 272c89837d745a9e48a8df80ef6313feda048f7d..0dbceee87a4b1a44d79e7cc9cd186972d9a19723 100644 (file)
@@ -25,6 +25,9 @@
 
 #define CIRC_ADD(idx, size, value)     (((idx) + (value)) & ((size) - 1))
 
+/* waitqueue for log readers */
+static DECLARE_WAIT_QUEUE_HEAD(cros_ec_debugfs_log_wq);
+
 /**
  * struct cros_ec_debugfs - EC debugging information.
  *
@@ -33,7 +36,6 @@
  * @log_buffer: circular buffer for console log information
  * @read_msg: preallocated EC command and buffer to read console log
  * @log_mutex: mutex to protect circular buffer
- * @log_wq: waitqueue for log readers
  * @log_poll_work: recurring task to poll EC for new console log data
  * @panicinfo_blob: panicinfo debugfs blob
  */
@@ -44,7 +46,6 @@ struct cros_ec_debugfs {
        struct circ_buf log_buffer;
        struct cros_ec_command *read_msg;
        struct mutex log_mutex;
-       wait_queue_head_t log_wq;
        struct delayed_work log_poll_work;
        /* EC panicinfo */
        struct debugfs_blob_wrapper panicinfo_blob;
@@ -107,7 +108,7 @@ static void cros_ec_console_log_work(struct work_struct *__work)
                        buf_space--;
                }
 
-               wake_up(&debug_info->log_wq);
+               wake_up(&cros_ec_debugfs_log_wq);
        }
 
        mutex_unlock(&debug_info->log_mutex);
@@ -141,7 +142,7 @@ static ssize_t cros_ec_console_log_read(struct file *file, char __user *buf,
 
                mutex_unlock(&debug_info->log_mutex);
 
-               ret = wait_event_interruptible(debug_info->log_wq,
+               ret = wait_event_interruptible(cros_ec_debugfs_log_wq,
                                        CIRC_CNT(cb->head, cb->tail, LOG_SIZE));
                if (ret < 0)
                        return ret;
@@ -173,7 +174,7 @@ static __poll_t cros_ec_console_log_poll(struct file *file,
        struct cros_ec_debugfs *debug_info = file->private_data;
        __poll_t mask = 0;
 
-       poll_wait(file, &debug_info->log_wq, wait);
+       poll_wait(file, &cros_ec_debugfs_log_wq, wait);
 
        mutex_lock(&debug_info->log_mutex);
        if (CIRC_CNT(debug_info->log_buffer.head,
@@ -377,7 +378,6 @@ static int cros_ec_create_console_log(struct cros_ec_debugfs *debug_info)
        debug_info->log_buffer.tail = 0;
 
        mutex_init(&debug_info->log_mutex);
-       init_waitqueue_head(&debug_info->log_wq);
 
        debugfs_create_file("console_log", S_IFREG | 0444, debug_info->dir,
                            debug_info, &cros_ec_console_log_fops);
index 98e37080f760913ecdbeb53f611ce71cfbecbc7f..71948dade0e2aedd0a1f1b4ea09dc954c6fb5b5e 100644 (file)
@@ -17,7 +17,8 @@
 #include <linux/sort.h>
 #include <linux/slab.h>
 
-#include "cros_ec_trace.h"
+#define CREATE_TRACE_POINTS
+#include "cros_ec_sensorhub_trace.h"
 
 /* Precision of fixed point for the m values from the filter */
 #define M_PRECISION BIT(23)
diff --git a/drivers/platform/chrome/cros_ec_sensorhub_trace.h b/drivers/platform/chrome/cros_ec_sensorhub_trace.h
new file mode 100644 (file)
index 0000000..57d9b47
--- /dev/null
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Trace events for the ChromeOS Sensorhub kernel module
+ *
+ * Copyright 2021 Google LLC.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cros_ec
+
+#if !defined(_CROS_EC_SENSORHUB_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _CROS_EC_SENSORHUB_TRACE_H_
+
+#include <linux/types.h>
+#include <linux/platform_data/cros_ec_sensorhub.h>
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(cros_ec_sensorhub_timestamp,
+           TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp,
+                    s64 current_timestamp, s64 current_time),
+       TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp,
+               current_time),
+       TP_STRUCT__entry(
+               __field(u32, ec_sample_timestamp)
+               __field(u32, ec_fifo_timestamp)
+               __field(s64, fifo_timestamp)
+               __field(s64, current_timestamp)
+               __field(s64, current_time)
+               __field(s64, delta)
+       ),
+       TP_fast_assign(
+               __entry->ec_sample_timestamp = ec_sample_timestamp;
+               __entry->ec_fifo_timestamp = ec_fifo_timestamp;
+               __entry->fifo_timestamp = fifo_timestamp;
+               __entry->current_timestamp = current_timestamp;
+               __entry->current_time = current_time;
+               __entry->delta = current_timestamp - current_time;
+       ),
+       TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
+                 __entry->ec_sample_timestamp,
+               __entry->ec_fifo_timestamp,
+               __entry->fifo_timestamp,
+               __entry->current_timestamp,
+               __entry->current_time,
+               __entry->delta
+       )
+);
+
+TRACE_EVENT(cros_ec_sensorhub_data,
+           TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp,
+                    s64 current_timestamp, s64 current_time),
+       TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time),
+       TP_STRUCT__entry(
+               __field(u32, ec_sensor_num)
+               __field(u32, ec_fifo_timestamp)
+               __field(s64, fifo_timestamp)
+               __field(s64, current_timestamp)
+               __field(s64, current_time)
+               __field(s64, delta)
+       ),
+       TP_fast_assign(
+               __entry->ec_sensor_num = ec_sensor_num;
+               __entry->ec_fifo_timestamp = ec_fifo_timestamp;
+               __entry->fifo_timestamp = fifo_timestamp;
+               __entry->current_timestamp = current_timestamp;
+               __entry->current_time = current_time;
+               __entry->delta = current_timestamp - current_time;
+       ),
+       TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
+                 __entry->ec_sensor_num,
+               __entry->ec_fifo_timestamp,
+               __entry->fifo_timestamp,
+               __entry->current_timestamp,
+               __entry->current_time,
+               __entry->delta
+       )
+);
+
+TRACE_EVENT(cros_ec_sensorhub_filter,
+           TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy),
+       TP_ARGS(state, dx, dy),
+       TP_STRUCT__entry(
+               __field(s64, dx)
+               __field(s64, dy)
+               __field(s64, median_m)
+               __field(s64, median_error)
+               __field(s64, history_len)
+               __field(s64, x)
+               __field(s64, y)
+       ),
+       TP_fast_assign(
+               __entry->dx = dx;
+               __entry->dy = dy;
+               __entry->median_m = state->median_m;
+               __entry->median_error = state->median_error;
+               __entry->history_len = state->history_len;
+               __entry->x = state->x_offset;
+               __entry->y = state->y_offset;
+       ),
+       TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld",
+                 __entry->dx,
+               __entry->dy,
+               __entry->median_m,
+               __entry->median_error,
+               __entry->history_len,
+               __entry->x,
+               __entry->y
+       )
+);
+
+
+#endif /* _CROS_EC_SENSORHUB_TRACE_H_ */
+
+/* this part must be outside header guard */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cros_ec_sensorhub_trace
+
+#include <trace/define_trace.h>
index 7e7cfc98657a4aea599e93efb75094f6df4c4144..9bb5cd2c98b8b4690f4633883103c21a801ba04d 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/types.h>
 #include <linux/platform_data/cros_ec_commands.h>
 #include <linux/platform_data/cros_ec_proto.h>
-#include <linux/platform_data/cros_ec_sensorhub.h>
 
 #include <linux/tracepoint.h>
 
@@ -71,100 +70,6 @@ TRACE_EVENT(cros_ec_request_done,
                  __entry->retval)
 );
 
-TRACE_EVENT(cros_ec_sensorhub_timestamp,
-           TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp,
-                    s64 current_timestamp, s64 current_time),
-       TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp,
-               current_time),
-       TP_STRUCT__entry(
-               __field(u32, ec_sample_timestamp)
-               __field(u32, ec_fifo_timestamp)
-               __field(s64, fifo_timestamp)
-               __field(s64, current_timestamp)
-               __field(s64, current_time)
-               __field(s64, delta)
-       ),
-       TP_fast_assign(
-               __entry->ec_sample_timestamp = ec_sample_timestamp;
-               __entry->ec_fifo_timestamp = ec_fifo_timestamp;
-               __entry->fifo_timestamp = fifo_timestamp;
-               __entry->current_timestamp = current_timestamp;
-               __entry->current_time = current_time;
-               __entry->delta = current_timestamp - current_time;
-       ),
-       TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
-                 __entry->ec_sample_timestamp,
-               __entry->ec_fifo_timestamp,
-               __entry->fifo_timestamp,
-               __entry->current_timestamp,
-               __entry->current_time,
-               __entry->delta
-       )
-);
-
-TRACE_EVENT(cros_ec_sensorhub_data,
-           TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp,
-                    s64 current_timestamp, s64 current_time),
-       TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time),
-       TP_STRUCT__entry(
-               __field(u32, ec_sensor_num)
-               __field(u32, ec_fifo_timestamp)
-               __field(s64, fifo_timestamp)
-               __field(s64, current_timestamp)
-               __field(s64, current_time)
-               __field(s64, delta)
-       ),
-       TP_fast_assign(
-               __entry->ec_sensor_num = ec_sensor_num;
-               __entry->ec_fifo_timestamp = ec_fifo_timestamp;
-               __entry->fifo_timestamp = fifo_timestamp;
-               __entry->current_timestamp = current_timestamp;
-               __entry->current_time = current_time;
-               __entry->delta = current_timestamp - current_time;
-       ),
-       TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
-                 __entry->ec_sensor_num,
-               __entry->ec_fifo_timestamp,
-               __entry->fifo_timestamp,
-               __entry->current_timestamp,
-               __entry->current_time,
-               __entry->delta
-       )
-);
-
-TRACE_EVENT(cros_ec_sensorhub_filter,
-           TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy),
-       TP_ARGS(state, dx, dy),
-       TP_STRUCT__entry(
-               __field(s64, dx)
-               __field(s64, dy)
-               __field(s64, median_m)
-               __field(s64, median_error)
-               __field(s64, history_len)
-               __field(s64, x)
-               __field(s64, y)
-       ),
-       TP_fast_assign(
-               __entry->dx = dx;
-               __entry->dy = dy;
-               __entry->median_m = state->median_m;
-               __entry->median_error = state->median_error;
-               __entry->history_len = state->history_len;
-               __entry->x = state->x_offset;
-               __entry->y = state->y_offset;
-       ),
-       TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld",
-                 __entry->dx,
-               __entry->dy,
-               __entry->median_m,
-               __entry->median_error,
-               __entry->history_len,
-               __entry->x,
-               __entry->y
-       )
-);
-
-
 #endif /* _CROS_EC_TRACE_H_ */
 
 /* this part must be outside header guard */
index 5de0bfb0bc4d992b544f4f1eb66134cb0f5e2e59..4bd2752c0823869ac7794ada3661eca23963d0b8 100644 (file)
@@ -115,17 +115,18 @@ static int cros_typec_parse_port_props(struct typec_capability *cap,
                return ret;
        cap->data = ret;
 
+       /* Try-power-role is optional. */
        ret = fwnode_property_read_string(fwnode, "try-power-role", &buf);
        if (ret) {
-               dev_err(dev, "try-power-role not found: %d\n", ret);
-               return ret;
+               dev_warn(dev, "try-power-role not found: %d\n", ret);
+               cap->prefer_role = TYPEC_NO_PREFERRED_ROLE;
+       } else {
+               ret = typec_find_power_role(buf);
+               if (ret < 0)
+                       return ret;
+               cap->prefer_role = ret;
        }
 
-       ret = typec_find_power_role(buf);
-       if (ret < 0)
-               return ret;
-       cap->prefer_role = ret;
-
        cap->fwnode = fwnode;
 
        return 0;
@@ -227,6 +228,7 @@ static void cros_typec_remove_partner(struct cros_typec_data *typec,
        cros_typec_unregister_altmodes(typec, port_num, true);
 
        cros_typec_usb_disconnect_state(port);
+       port->mux_flags = USB_PD_MUX_NONE;
 
        typec_unregister_partner(port->partner);
        port->partner = NULL;
@@ -512,20 +514,38 @@ static int cros_typec_enable_usb4(struct cros_typec_data *typec,
 }
 
 static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
-                               uint8_t mux_flags,
                                struct ec_response_usb_pd_control_v2 *pd_ctrl)
 {
        struct cros_typec_port *port = typec->ports[port_num];
+       struct ec_response_usb_pd_mux_info resp;
+       struct ec_params_usb_pd_mux_info req = {
+               .port = port_num,
+       };
        struct ec_params_usb_pd_mux_ack mux_ack;
        enum typec_orientation orientation;
        int ret;
 
-       if (mux_flags == USB_PD_MUX_NONE) {
+       ret = cros_ec_command(typec->ec, 0, EC_CMD_USB_PD_MUX_INFO,
+                             &req, sizeof(req), &resp, sizeof(resp));
+       if (ret < 0) {
+               dev_warn(typec->dev, "Failed to get mux info for port: %d, err = %d\n",
+                        port_num, ret);
+               return ret;
+       }
+
+       /* No change needs to be made, let's exit early. */
+       if (port->mux_flags == resp.flags && port->role == pd_ctrl->role)
+               return 0;
+
+       port->mux_flags = resp.flags;
+       port->role = pd_ctrl->role;
+
+       if (port->mux_flags == USB_PD_MUX_NONE) {
                ret = cros_typec_usb_disconnect_state(port);
                goto mux_ack;
        }
 
-       if (mux_flags & USB_PD_MUX_POLARITY_INVERTED)
+       if (port->mux_flags & USB_PD_MUX_POLARITY_INVERTED)
                orientation = TYPEC_ORIENTATION_REVERSE;
        else
                orientation = TYPEC_ORIENTATION_NORMAL;
@@ -540,22 +560,22 @@ static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
        if (ret)
                return ret;
 
-       if (mux_flags & USB_PD_MUX_USB4_ENABLED) {
+       if (port->mux_flags & USB_PD_MUX_USB4_ENABLED) {
                ret = cros_typec_enable_usb4(typec, port_num, pd_ctrl);
-       } else if (mux_flags & USB_PD_MUX_TBT_COMPAT_ENABLED) {
+       } else if (port->mux_flags & USB_PD_MUX_TBT_COMPAT_ENABLED) {
                ret = cros_typec_enable_tbt(typec, port_num, pd_ctrl);
-       } else if (mux_flags & USB_PD_MUX_DP_ENABLED) {
+       } else if (port->mux_flags & USB_PD_MUX_DP_ENABLED) {
                ret = cros_typec_enable_dp(typec, port_num, pd_ctrl);
-       } else if (mux_flags & USB_PD_MUX_SAFE_MODE) {
+       } else if (port->mux_flags & USB_PD_MUX_SAFE_MODE) {
                ret = cros_typec_usb_safe_state(port);
-       } else if (mux_flags & USB_PD_MUX_USB_ENABLED) {
+       } else if (port->mux_flags & USB_PD_MUX_USB_ENABLED) {
                port->state.alt = NULL;
                port->state.mode = TYPEC_STATE_USB;
                ret = typec_mux_set(port->mux, &port->state);
        } else {
                dev_dbg(typec->dev,
                        "Unrecognized mode requested, mux flags: %x\n",
-                       mux_flags);
+                       port->mux_flags);
        }
 
 mux_ack:
@@ -630,17 +650,6 @@ static void cros_typec_set_port_params_v1(struct cros_typec_data *typec,
        }
 }
 
-static int cros_typec_get_mux_info(struct cros_typec_data *typec, int port_num,
-                                  struct ec_response_usb_pd_mux_info *resp)
-{
-       struct ec_params_usb_pd_mux_info req = {
-               .port = port_num,
-       };
-
-       return cros_ec_command(typec->ec, 0, EC_CMD_USB_PD_MUX_INFO, &req,
-                              sizeof(req), resp, sizeof(*resp));
-}
-
 /*
  * Helper function to register partner/plug altmodes.
  */
@@ -938,7 +947,6 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
 {
        struct ec_params_usb_pd_control req;
        struct ec_response_usb_pd_control_v2 resp;
-       struct ec_response_usb_pd_mux_info mux_resp;
        int ret;
 
        if (port_num < 0 || port_num >= typec->num_ports) {
@@ -958,6 +966,11 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
        if (ret < 0)
                return ret;
 
+       /* Update the switches if they exist, according to requested state */
+       ret = cros_typec_configure_mux(typec, port_num, &resp);
+       if (ret)
+               dev_warn(typec->dev, "Configure muxes failed, err = %d\n", ret);
+
        dev_dbg(typec->dev, "Enabled %d: 0x%hhx\n", port_num, resp.enabled);
        dev_dbg(typec->dev, "Role %d: 0x%hhx\n", port_num, resp.role);
        dev_dbg(typec->dev, "Polarity %d: 0x%hhx\n", port_num, resp.polarity);
@@ -973,27 +986,7 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
        if (typec->typec_cmd_supported)
                cros_typec_handle_status(typec, port_num);
 
-       /* Update the switches if they exist, according to requested state */
-       ret = cros_typec_get_mux_info(typec, port_num, &mux_resp);
-       if (ret < 0) {
-               dev_warn(typec->dev,
-                        "Failed to get mux info for port: %d, err = %d\n",
-                        port_num, ret);
-               return 0;
-       }
-
-       /* No change needs to be made, let's exit early. */
-       if (typec->ports[port_num]->mux_flags == mux_resp.flags &&
-           typec->ports[port_num]->role == resp.role)
-               return 0;
-
-       typec->ports[port_num]->mux_flags = mux_resp.flags;
-       typec->ports[port_num]->role = resp.role;
-       ret = cros_typec_configure_mux(typec, port_num, mux_resp.flags, &resp);
-       if (ret)
-               dev_warn(typec->dev, "Configure muxes failed, err = %d\n", ret);
-
-       return ret;
+       return 0;
 }
 
 static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
@@ -1075,7 +1068,13 @@ static int cros_typec_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        typec->dev = dev;
+
        typec->ec = dev_get_drvdata(pdev->dev.parent);
+       if (!typec->ec) {
+               dev_err(dev, "couldn't find parent EC device\n");
+               return -ENODEV;
+       }
+
        platform_set_drvdata(pdev, typec);
 
        ret = cros_typec_get_cmd_version(typec);
index 6b8b3ab8db486c483ce146bfdfeb8bc5f216fc46..3463629f87640a2efcaf477a731e5a9058a4fe14 100644 (file)
@@ -584,21 +584,6 @@ static struct platform_driver acerhdf_driver = {
        .remove = acerhdf_remove,
 };
 
-/* checks if str begins with start */
-static int str_starts_with(const char *str, const char *start)
-{
-       unsigned long str_len = 0, start_len = 0;
-
-       str_len = strlen(str);
-       start_len = strlen(start);
-
-       if (str_len >= start_len &&
-                       !strncmp(str, start, start_len))
-               return 1;
-
-       return 0;
-}
-
 /* check hardware */
 static int __init acerhdf_check_hardware(void)
 {
@@ -651,9 +636,9 @@ static int __init acerhdf_check_hardware(void)
                 * check if actual hardware BIOS vendor, product and version
                 * IDs start with the strings of BIOS table entry
                 */
-               if (str_starts_with(vendor, bt->vendor) &&
-                               str_starts_with(product, bt->product) &&
-                               str_starts_with(version, bt->version)) {
+               if (strstarts(vendor, bt->vendor) &&
+                   strstarts(product, bt->product) &&
+                   strstarts(version, bt->version)) {
                        found = 1;
                        break;
                }
index e9d0dbbb28870d2f103c79a8a125709a6e332819..fa4123dbdf7ff6a407f488c86c130fba1bcf771b 100644 (file)
@@ -160,8 +160,10 @@ MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism");
 
 static struct amd_pmc_dev pmc;
 static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret);
-static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data);
 static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf);
+#ifdef CONFIG_SUSPEND
+static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data);
+#endif
 
 static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
 {
@@ -325,6 +327,7 @@ static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table
        return 0;
 }
 
+#ifdef CONFIG_SUSPEND
 static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
 {
        struct smu_metrics table;
@@ -338,6 +341,7 @@ static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
                dev_dbg(pdev->dev, "Last suspend in deepest state for %lluus\n",
                         table.timein_s0i3_lastcapture);
 }
+#endif
 
 #ifdef CONFIG_DEBUG_FS
 static int smu_fw_info_show(struct seq_file *s, void *unused)
@@ -569,6 +573,7 @@ out_unlock:
        return rc;
 }
 
+#ifdef CONFIG_SUSPEND
 static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
 {
        switch (dev->cpu_id) {
@@ -694,6 +699,7 @@ static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = {
        .prepare = amd_pmc_s2idle_prepare,
        .restore = amd_pmc_s2idle_restore,
 };
+#endif
 
 static const struct pci_device_id pmc_pci_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
@@ -733,6 +739,7 @@ static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
        return 0;
 }
 
+#ifdef CONFIG_SUSPEND
 static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
 {
        int err;
@@ -753,6 +760,7 @@ static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
 
        return 0;
 }
+#endif
 
 static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf)
 {
@@ -859,9 +867,11 @@ static int amd_pmc_probe(struct platform_device *pdev)
 
        amd_pmc_get_smu_version(dev);
        platform_set_drvdata(pdev, dev);
+#ifdef CONFIG_SUSPEND
        err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops);
        if (err)
                dev_warn(dev->dev, "failed to register LPS0 sleep handler, expect increased power consumption\n");
+#endif
 
        amd_pmc_dbgfs_register(dev);
        return 0;
@@ -875,7 +885,9 @@ static int amd_pmc_remove(struct platform_device *pdev)
 {
        struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
 
+#ifdef CONFIG_SUSPEND
        acpi_unregister_lps0_dev(&amd_pmc_s2idle_dev_ops);
+#endif
        amd_pmc_dbgfs_unregister(dev);
        pci_dev_put(dev->rdev);
        mutex_destroy(&dev->lock);
index f5c72e33f9ae340b5e2aa75d2cd3694e5bfa19e4..05534287bc26b65db179ab0e29f83508ae5e4b87 100644 (file)
@@ -10,7 +10,6 @@
 
 #define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
 
-#include <linux/io.h>
 #include <linux/delay.h>
 #include <linux/dmi.h>
 #include <linux/err.h>
index c1d9ed9b7b672d45b30a4671113a105001a2eb1b..19f6b456234f8bd5443b38ceba4189c5c1ba7d03 100644 (file)
@@ -1121,8 +1121,6 @@ static void kbd_led_set(struct led_classdev *led_cdev,
 
        if (value > samsung->kbd_led.max_brightness)
                value = samsung->kbd_led.max_brightness;
-       else if (value < 0)
-               value = 0;
 
        samsung->kbd_led_wk = value;
        queue_work(samsung->led_workqueue, &samsung->kbd_led_work);
index bce17ca9794742ee002b9d63d8d06a03cc56bfec..a01a92769c1a367ac207b7d7a55b52060130468d 100644 (file)
@@ -740,16 +740,8 @@ static ssize_t certificate_store(struct kobject *kobj,
        if (!tlmi_priv.certificate_support)
                return -EOPNOTSUPP;
 
-       new_cert = kstrdup(buf, GFP_KERNEL);
-       if (!new_cert)
-               return -ENOMEM;
-       /* Strip out CR if one is present */
-       strip_cr(new_cert);
-
        /* If empty then clear installed certificate */
-       if (new_cert[0] == '\0') { /* Clear installed certificate */
-               kfree(new_cert);
-
+       if ((buf[0] == '\0') || (buf[0] == '\n')) { /* Clear installed certificate */
                /* Check that signature is set */
                if (!setting->signature || !setting->signature[0])
                        return -EACCES;
@@ -763,14 +755,16 @@ static ssize_t certificate_store(struct kobject *kobj,
 
                ret = tlmi_simple_call(LENOVO_CLEAR_BIOS_CERT_GUID, auth_str);
                kfree(auth_str);
-               if (ret)
-                       return ret;
 
-               kfree(setting->certificate);
-               setting->certificate = NULL;
-               return count;
+               return ret ?: count;
        }
 
+       new_cert = kstrdup(buf, GFP_KERNEL);
+       if (!new_cert)
+               return -ENOMEM;
+       /* Strip out CR if one is present */
+       strip_cr(new_cert);
+
        if (setting->cert_installed) {
                /* Certificate is installed so this is an update */
                if (!setting->signature || !setting->signature[0]) {
@@ -792,21 +786,14 @@ static ssize_t certificate_store(struct kobject *kobj,
                auth_str = kasprintf(GFP_KERNEL, "%s,%s",
                                new_cert, setting->password);
        }
-       if (!auth_str) {
-               kfree(new_cert);
+       kfree(new_cert);
+       if (!auth_str)
                return -ENOMEM;
-       }
 
        ret = tlmi_simple_call(guid, auth_str);
        kfree(auth_str);
-       if (ret) {
-               kfree(new_cert);
-               return ret;
-       }
 
-       kfree(setting->certificate);
-       setting->certificate = new_cert;
-       return count;
+       return ret ?: count;
 }
 
 static struct kobj_attribute auth_certificate = __ATTR_WO(certificate);
@@ -1194,6 +1181,10 @@ static void tlmi_release_attr(void)
 
        kset_unregister(tlmi_priv.attribute_kset);
 
+       /* Free up any saved signatures */
+       kfree(tlmi_priv.pwd_admin->signature);
+       kfree(tlmi_priv.pwd_admin->save_signature);
+
        /* Authentication structures */
        sysfs_remove_group(&tlmi_priv.pwd_admin->kobj, &auth_attr_group);
        kobject_put(&tlmi_priv.pwd_admin->kobj);
@@ -1210,11 +1201,6 @@ static void tlmi_release_attr(void)
        }
 
        kset_unregister(tlmi_priv.authentication_kset);
-
-       /* Free up any saved certificates/signatures */
-       kfree(tlmi_priv.pwd_admin->certificate);
-       kfree(tlmi_priv.pwd_admin->signature);
-       kfree(tlmi_priv.pwd_admin->save_signature);
 }
 
 static int tlmi_sysfs_init(void)
index 4f69df6eed07d0fad2adad2868a6141a926a19ca..4daba6151cd670bea54342769942c649ddd36c08 100644 (file)
@@ -63,7 +63,6 @@ struct tlmi_pwd_setting {
        int index; /*Used for HDD and NVME auth */
        enum level_option level;
        bool cert_installed;
-       char *certificate;
        char *signature;
        char *save_signature;
 };
index ea02c8dcd7484a1652238a1dc739ebb02056a56b..d925cb137e1268f38cc91b5b7988dd6273beac18 100644 (file)
@@ -604,6 +604,12 @@ int power_supply_get_battery_info(struct power_supply *psy,
        err = samsung_sdi_battery_get_info(&psy->dev, value, &info);
        if (!err)
                goto out_ret_pointer;
+       else if (err == -ENODEV)
+               /*
+                * Device does not have a static battery.
+                * Proceed to look for a simple battery.
+                */
+               err = 0;
 
        if (strcmp("simple-battery", value)) {
                err = -ENODEV;
index 9d59f277f51984188027eaac00cfe0360f044166..b33daab798b98a93260f94c20a1608ad0cabf54f 100644 (file)
@@ -824,6 +824,7 @@ static struct samsung_sdi_battery samsung_sdi_batteries[] = {
                        .constant_charge_current_max_ua = 900000,
                        .constant_charge_voltage_max_uv = 4200000,
                        .charge_term_current_ua = 200000,
+                       .charge_restart_voltage_uv = 4170000,
                        .maintenance_charge = samsung_maint_charge_table,
                        .maintenance_charge_size = ARRAY_SIZE(samsung_maint_charge_table),
                        .alert_low_temp_charge_current_ua = 300000,
@@ -867,6 +868,7 @@ static struct samsung_sdi_battery samsung_sdi_batteries[] = {
                        .constant_charge_current_max_ua = 1500000,
                        .constant_charge_voltage_max_uv = 4350000,
                        .charge_term_current_ua = 120000,
+                       .charge_restart_voltage_uv = 4300000,
                        .maintenance_charge = samsung_maint_charge_table,
                        .maintenance_charge_size = ARRAY_SIZE(samsung_maint_charge_table),
                        .alert_low_temp_charge_current_ua = 300000,
index 05147d2c384289e0bb90fb513c252c01d4907569..485e58b264c044fffa3cd569dc2f89c6f54e5aa8 100644 (file)
@@ -292,6 +292,7 @@ enum atc2603c_reg_ids {
        .bypass_mask = BIT(5), \
        .active_discharge_reg = ATC2603C_PMU_SWITCH_CTL, \
        .active_discharge_mask = BIT(1), \
+       .active_discharge_on = BIT(1), \
        .owner = THIS_MODULE, \
 }
 
index f21e3f8b21f23b54393eb5c6dc9ead9e61ced07e..8e13dea354a21e7ca69da47cca2a083d417ada72 100644 (file)
@@ -285,6 +285,7 @@ static const unsigned int rtq2134_buck_ramp_delay_table[] = {
                .enable_mask = RTQ2134_VOUTEN_MASK, \
                .active_discharge_reg = RTQ2134_REG_BUCK##_id##_CFG0, \
                .active_discharge_mask = RTQ2134_ACTDISCHG_MASK, \
+               .active_discharge_on = RTQ2134_ACTDISCHG_MASK, \
                .ramp_reg = RTQ2134_REG_BUCK##_id##_RSPCFG, \
                .ramp_mask = RTQ2134_RSPUP_MASK, \
                .ramp_delay_table = rtq2134_buck_ramp_delay_table, \
index cadea0344486fa6e63eabb79cee8159ae0f067e5..40befdd9dfa922bf5878f2d46bc9bf6cec413223 100644 (file)
@@ -71,6 +71,35 @@ static const struct regulator_ops wm8994_ldo2_ops = {
 };
 
 static const struct regulator_desc wm8994_ldo_desc[] = {
+       {
+               .name = "LDO1",
+               .id = 1,
+               .type = REGULATOR_VOLTAGE,
+               .n_voltages = WM8994_LDO1_MAX_SELECTOR + 1,
+               .vsel_reg = WM8994_LDO_1,
+               .vsel_mask = WM8994_LDO1_VSEL_MASK,
+               .ops = &wm8994_ldo1_ops,
+               .min_uV = 2400000,
+               .uV_step = 100000,
+               .enable_time = 3000,
+               .off_on_delay = 36000,
+               .owner = THIS_MODULE,
+       },
+       {
+               .name = "LDO2",
+               .id = 2,
+               .type = REGULATOR_VOLTAGE,
+               .n_voltages = WM8994_LDO2_MAX_SELECTOR + 1,
+               .vsel_reg = WM8994_LDO_2,
+               .vsel_mask = WM8994_LDO2_VSEL_MASK,
+               .ops = &wm8994_ldo2_ops,
+               .enable_time = 3000,
+               .off_on_delay = 36000,
+               .owner = THIS_MODULE,
+       },
+};
+
+static const struct regulator_desc wm8958_ldo_desc[] = {
        {
                .name = "LDO1",
                .id = 1,
@@ -172,9 +201,16 @@ static int wm8994_ldo_probe(struct platform_device *pdev)
         * regulator core and we need not worry about it on the
         * error path.
         */
-       ldo->regulator = devm_regulator_register(&pdev->dev,
-                                                &wm8994_ldo_desc[id],
-                                                &config);
+       if (ldo->wm8994->type == WM8994) {
+               ldo->regulator = devm_regulator_register(&pdev->dev,
+                                                        &wm8994_ldo_desc[id],
+                                                        &config);
+       } else {
+               ldo->regulator = devm_regulator_register(&pdev->dev,
+                                                        &wm8958_ldo_desc[id],
+                                                        &config);
+       }
+
        if (IS_ERR(ldo->regulator)) {
                ret = PTR_ERR(ldo->regulator);
                dev_err(wm8994->dev, "Failed to register LDO%d: %d\n",
index 1e831503885066b132faafd811d43837e3b72154..a8dde46063602dcc799db1e7bac5a3eef9fdf9e7 100644 (file)
@@ -121,7 +121,9 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
                return dev_err_probe(dev, PTR_ERR(priv->rstc),
                                     "failed to get reset\n");
 
-       reset_control_deassert(priv->rstc);
+       error = reset_control_deassert(priv->rstc);
+       if (error)
+               return error;
 
        priv->rcdev.ops = &rzg2l_usbphy_ctrl_reset_ops;
        priv->rcdev.of_reset_n_cells = 1;
index 24d3395964cc4ba2d3934a32299fef3f667cd45f..4c5bba52b10593890c9a95ccea929148db9cdc5f 100644 (file)
@@ -20,6 +20,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
        struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
        struct mrq_reset_request request;
        struct tegra_bpmp_message msg;
+       int err;
 
        memset(&request, 0, sizeof(request));
        request.cmd = command;
@@ -30,7 +31,13 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
        msg.tx.data = &request;
        msg.tx.size = sizeof(request);
 
-       return tegra_bpmp_transfer(bpmp, &msg);
+       err = tegra_bpmp_transfer(bpmp, &msg);
+       if (err)
+               return err;
+       if (msg.rx.ret)
+               return -EINVAL;
+
+       return 0;
 }
 
 static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
index f6d6d4c26361e87c1cf9cfa0f3eec44b82b06979..41c65b4d2baf63b26b886b1f524697c027d9eabd 100644 (file)
@@ -1293,6 +1293,16 @@ config RTC_DRV_OPAL
          This driver can also be built as a module. If so, the module
          will be called rtc-opal.
 
+config RTC_DRV_OPTEE
+       tristate "OP-TEE based RTC driver"
+       depends on OPTEE
+       help
+         Select this to get support for OP-TEE based RTC control on SoCs where
+         RTC are not accessible to the normal world (Linux).
+
+         This driver can also be built as a module. If so, the module
+         will be called rtc-optee.
+
 config RTC_DRV_ZYNQMP
        tristate "Xilinx Zynq Ultrascale+ MPSoC RTC"
        depends on OF && HAS_IOMEM
index e92f3e94324511895d8053e92800d2168c7fa17b..2d827d8261d55892f99c1ba891208bcdb8f2ce3b 100644 (file)
@@ -115,6 +115,7 @@ obj-$(CONFIG_RTC_DRV_GAMECUBE)      += rtc-gamecube.o
 obj-$(CONFIG_RTC_DRV_NTXEC)    += rtc-ntxec.o
 obj-$(CONFIG_RTC_DRV_OMAP)     += rtc-omap.o
 obj-$(CONFIG_RTC_DRV_OPAL)     += rtc-opal.o
+obj-$(CONFIG_RTC_DRV_OPTEE)    += rtc-optee.o
 obj-$(CONFIG_RTC_DRV_PALMAS)   += rtc-palmas.o
 obj-$(CONFIG_RTC_DRV_PCAP)     += rtc-pcap.o
 obj-$(CONFIG_RTC_DRV_PCF2123)  += rtc-pcf2123.o
index 4b460c61f1d8c478090d4681143f906652759bdb..3c8eec2218dfb478be5b3981a9a183a4708f4f99 100644 (file)
@@ -26,6 +26,15 @@ struct class *rtc_class;
 static void rtc_device_release(struct device *dev)
 {
        struct rtc_device *rtc = to_rtc_device(dev);
+       struct timerqueue_head *head = &rtc->timerqueue;
+       struct timerqueue_node *node;
+
+       mutex_lock(&rtc->ops_lock);
+       while ((node = timerqueue_getnext(head)))
+               timerqueue_del(head, node);
+       mutex_unlock(&rtc->ops_lock);
+
+       cancel_work_sync(&rtc->irqwork);
 
        ida_simple_remove(&rtc_ida, rtc->id);
        mutex_destroy(&rtc->ops_lock);
@@ -390,9 +399,6 @@ int __devm_rtc_register_device(struct module *owner, struct rtc_device *rtc)
        if (!rtc->ops->set_alarm)
                clear_bit(RTC_FEATURE_ALARM, rtc->features);
 
-       if (rtc->uie_unsupported)
-               clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
-
        if (rtc->ops->set_offset)
                set_bit(RTC_FEATURE_CORRECTION, rtc->features);
 
index d8e835798153730ff76531aee9c9ac9e90eaeb4c..9edd662c69ace4da09ef528e6b8216114987be9e 100644 (file)
@@ -804,9 +804,13 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
        struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
        struct rtc_time tm;
        ktime_t now;
+       int err;
+
+       err = __rtc_read_time(rtc, &tm);
+       if (err)
+               return err;
 
        timer->enabled = 1;
-       __rtc_read_time(rtc, &tm);
        now = rtc_tm_to_ktime(tm);
 
        /* Skip over expired timers */
@@ -820,7 +824,6 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
        trace_rtc_timer_enqueue(timer);
        if (!next || ktime_before(timer->node.expires, next->expires)) {
                struct rtc_wkalrm alarm;
-               int err;
 
                alarm.time = rtc_ktime_to_tm(timer->node.expires);
                alarm.enabled = 1;
index 336cb9aa5e336d74889343a7e53bcbeb269683fb..d51565bcc189690a754ba2879cc0b62c058e8bf2 100644 (file)
@@ -1955,7 +1955,7 @@ static int ds1307_probe(struct i2c_client *client,
                dev_info(ds1307->dev,
                         "'wakeup-source' is set, request for an IRQ is disabled!\n");
                /* We cannot support UIE mode if we do not have an IRQ line */
-               ds1307->rtc->uie_unsupported = 1;
+               clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, ds1307->rtc->features);
        }
 
        if (want_irq) {
index 75db7ab654a5ae163855bd8805af975962cc8d91..a24331ba8a5fc9bbce488b3e0f2810a6a6f90c2b 100644 (file)
@@ -1273,7 +1273,7 @@ ds1685_rtc_probe(struct platform_device *pdev)
 
        /* See if the platform doesn't support UIE. */
        if (pdata->uie_unsupported)
-               rtc_dev->uie_unsupported = 1;
+               clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc_dev->features);
 
        rtc->dev = rtc_dev;
 
@@ -1285,13 +1285,10 @@ ds1685_rtc_probe(struct platform_device *pdev)
         * there won't be an automatic way of notifying the kernel about it,
         * unless ctrlc is explicitly polled.
         */
-       if (!pdata->no_irq) {
-               ret = platform_get_irq(pdev, 0);
-               if (ret <= 0)
-                       return ret;
-
-               rtc->irq_num = ret;
-
+       rtc->irq_num = platform_get_irq(pdev, 0);
+       if (rtc->irq_num <= 0) {
+               clear_bit(RTC_FEATURE_ALARM, rtc_dev->features);
+       } else {
                /* Request an IRQ. */
                ret = devm_request_threaded_irq(&pdev->dev, rtc->irq_num,
                                       NULL, ds1685_rtc_irq_handler,
@@ -1305,7 +1302,6 @@ ds1685_rtc_probe(struct platform_device *pdev)
                        rtc->irq_num = 0;
                }
        }
-       rtc->no_irq = pdata->no_irq;
 
        /* Setup complete. */
        ds1685_rtc_switch_to_bank0(rtc);
@@ -1394,7 +1390,7 @@ ds1685_rtc_poweroff(struct platform_device *pdev)
                 * have been taken care of by the shutdown scripts and this
                 * is the final function call.
                 */
-               if (!rtc->no_irq)
+               if (rtc->irq_num)
                        disable_irq_nosync(rtc->irq_num);
 
                /* Oscillator must be on and the countdown chain enabled. */
index 138c5e0046c8dbcfe24a2a7bae295960d6f82941..11850c2880ad487d3e8b3306fe6057e766e40c1e 100644 (file)
@@ -261,15 +261,17 @@ static int __init efi_rtc_probe(struct platform_device *dev)
        if (efi.get_time(&eft, &cap) != EFI_SUCCESS)
                return -ENODEV;
 
-       rtc = devm_rtc_device_register(&dev->dev, "rtc-efi", &efi_rtc_ops,
-                                       THIS_MODULE);
+       rtc = devm_rtc_allocate_device(&dev->dev);
        if (IS_ERR(rtc))
                return PTR_ERR(rtc);
 
-       rtc->uie_unsupported = 1;
        platform_set_drvdata(dev, rtc);
 
-       return 0;
+       rtc->ops = &efi_rtc_ops;
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
+       set_bit(RTC_FEATURE_ALARM_WAKEUP_ONLY, rtc->features);
+
+       return devm_rtc_register_device(rtc);
 }
 
 static struct platform_driver efi_rtc_driver = {
index f717b36f4738cf2513cf127c54653166a26d8056..18ca3b38b2d0474f58f4a56b3ae94d5afb3f86d9 100644 (file)
@@ -235,6 +235,7 @@ static int gamecube_rtc_read_offset_from_sram(struct priv *d)
        }
 
        ret = of_address_to_resource(np, 0, &res);
+       of_node_put(np);
        if (ret) {
                pr_err("no io memory range found\n");
                return -1;
index 0751cae2728595ad899031dcf7280cc65ec0f3d9..90e602e99d03a88448a71623ce41619959afed28 100644 (file)
@@ -220,24 +220,6 @@ static int hym8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
        u8 buf[4];
        int ret;
 
-       /*
-        * The alarm has no seconds so deal with it
-        */
-       if (alm_tm->tm_sec) {
-               alm_tm->tm_sec = 0;
-               alm_tm->tm_min++;
-               if (alm_tm->tm_min >= 60) {
-                       alm_tm->tm_min = 0;
-                       alm_tm->tm_hour++;
-                       if (alm_tm->tm_hour >= 24) {
-                               alm_tm->tm_hour = 0;
-                               alm_tm->tm_mday++;
-                               if (alm_tm->tm_mday > 31)
-                                       alm_tm->tm_mday = 0;
-                       }
-               }
-       }
-
        ret = i2c_smbus_read_byte_data(client, HYM8563_CTL2);
        if (ret < 0)
                return ret;
@@ -523,6 +505,10 @@ static int hym8563_probe(struct i2c_client *client,
        if (!hym8563)
                return -ENOMEM;
 
+       hym8563->rtc = devm_rtc_allocate_device(&client->dev);
+       if (IS_ERR(hym8563->rtc))
+               return PTR_ERR(hym8563->rtc);
+
        hym8563->client = client;
        i2c_set_clientdata(client, hym8563);
 
@@ -557,19 +543,15 @@ static int hym8563_probe(struct i2c_client *client,
        dev_dbg(&client->dev, "rtc information is %s\n",
                (ret & HYM8563_SEC_VL) ? "invalid" : "valid");
 
-       hym8563->rtc = devm_rtc_device_register(&client->dev, client->name,
-                                               &hym8563_rtc_ops, THIS_MODULE);
-       if (IS_ERR(hym8563->rtc))
-               return PTR_ERR(hym8563->rtc);
-
-       /* the hym8563 alarm only supports a minute accuracy */
-       hym8563->rtc->uie_unsupported = 1;
+       hym8563->rtc->ops = &hym8563_rtc_ops;
+       set_bit(RTC_FEATURE_ALARM_RES_MINUTE, hym8563->rtc->features);
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, hym8563->rtc->features);
 
 #ifdef CONFIG_COMMON_CLK
        hym8563_clkout_register_clk(hym8563);
 #endif
 
-       return 0;
+       return devm_rtc_register_device(hym8563->rtc);
 }
 
 static const struct i2c_device_id hym8563_id[] = {
index 6d383b629d20a2e65a977468ca6ca36121ef4ece..d868458cd40ed05c200cfe456267df385a5886a5 100644 (file)
@@ -932,10 +932,8 @@ static int m41t80_probe(struct i2c_client *client,
        m41t80_data->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
        m41t80_data->rtc->range_max = RTC_TIMESTAMP_END_2099;
 
-       if (client->irq <= 0) {
-               /* We cannot support UIE mode if we do not have an IRQ line */
-               m41t80_data->rtc->uie_unsupported = 1;
-       }
+       if (client->irq <= 0)
+               clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, m41t80_data->rtc->features);
 
        /* Make sure HT (Halt Update) bit is cleared */
        rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR);
index ae9f131b43c0c69586061bef1de46f56bbbf7f56..522449b25921efd0ac23f6d3c6bac0173654d3ba 100644 (file)
@@ -176,6 +176,17 @@ int mc146818_get_time(struct rtc_time *time)
 }
 EXPORT_SYMBOL_GPL(mc146818_get_time);
 
+/* AMD systems don't allow access to AltCentury with DV1 */
+static bool apply_amd_register_a_behavior(void)
+{
+#ifdef CONFIG_X86
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+           boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+               return true;
+#endif
+       return false;
+}
+
 /* Set the current date and time in the real time clock. */
 int mc146818_set_time(struct rtc_time *time)
 {
@@ -232,8 +243,10 @@ int mc146818_set_time(struct rtc_time *time)
        if (yrs >= 100)
                yrs -= 100;
 
-       if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
-           || RTC_ALWAYS_BCD) {
+       spin_lock_irqsave(&rtc_lock, flags);
+       save_control = CMOS_READ(RTC_CONTROL);
+       spin_unlock_irqrestore(&rtc_lock, flags);
+       if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
                sec = bin2bcd(sec);
                min = bin2bcd(min);
                hrs = bin2bcd(hrs);
@@ -247,7 +260,10 @@ int mc146818_set_time(struct rtc_time *time)
        save_control = CMOS_READ(RTC_CONTROL);
        CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
        save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
-       CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+       if (apply_amd_register_a_behavior())
+               CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT);
+       else
+               CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
 
 #ifdef CONFIG_MACH_DECSTATION
        CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
index bb2ea9bc56f2c51cebdb238b3b5f327fc459169c..6d7656a75cae50cb8cf90071492201a5acc8bb2a 100644 (file)
@@ -210,20 +210,6 @@ static int mpc5121_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
        struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
        struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
 
-       /*
-        * the alarm has no seconds so deal with it
-        */
-       if (alarm->time.tm_sec) {
-               alarm->time.tm_sec = 0;
-               alarm->time.tm_min++;
-               if (alarm->time.tm_min >= 60) {
-                       alarm->time.tm_min = 0;
-                       alarm->time.tm_hour++;
-                       if (alarm->time.tm_hour >= 24)
-                               alarm->time.tm_hour = 0;
-               }
-       }
-
        alarm->time.tm_mday = -1;
        alarm->time.tm_mon = -1;
        alarm->time.tm_year = -1;
@@ -349,7 +335,8 @@ static int mpc5121_rtc_probe(struct platform_device *op)
        }
 
        rtc->rtc->ops = &mpc5200_rtc_ops;
-       rtc->rtc->uie_unsupported = 1;
+       set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->rtc->features);
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->rtc->features);
        rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
        rtc->rtc->range_max = 65733206399ULL; /* 4052-12-31 23:59:59 */
 
index f8f49a969c231a333487a77ee395c81b9d1ea958..ad41aaf8a17f5397e8b535cd9e3c7c5e14683842 100644 (file)
@@ -250,7 +250,7 @@ static int opal_rtc_probe(struct platform_device *pdev)
        rtc->ops = &opal_rtc_ops;
        rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
        rtc->range_max = RTC_TIMESTAMP_END_9999;
-       rtc->uie_unsupported = 1;
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
 
        return devm_rtc_register_device(rtc);
 }
diff --git a/drivers/rtc/rtc-optee.c b/drivers/rtc/rtc-optee.c
new file mode 100644 (file)
index 0000000..9f8b5d4
--- /dev/null
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Microchip.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rtc.h>
+#include <linux/tee_drv.h>
+
+#define RTC_INFO_VERSION       0x1
+
+#define TA_CMD_RTC_GET_INFO            0x0
+#define TA_CMD_RTC_GET_TIME            0x1
+#define TA_CMD_RTC_SET_TIME            0x2
+#define TA_CMD_RTC_GET_OFFSET          0x3
+#define TA_CMD_RTC_SET_OFFSET          0x4
+
+#define TA_RTC_FEATURE_CORRECTION      BIT(0)
+
+struct optee_rtc_time {
+       u32 tm_sec;
+       u32 tm_min;
+       u32 tm_hour;
+       u32 tm_mday;
+       u32 tm_mon;
+       u32 tm_year;
+       u32 tm_wday;
+};
+
+struct optee_rtc_info {
+       u64 version;
+       u64 features;
+       struct optee_rtc_time range_min;
+       struct optee_rtc_time range_max;
+};
+
+/**
+ * struct optee_rtc - OP-TEE RTC private data
+ * @dev:               OP-TEE based RTC device.
+ * @ctx:               OP-TEE context handler.
+ * @session_id:                RTC TA session identifier.
+ * @shm:               Memory pool shared with RTC device.
+ * @features:          Bitfield of RTC features
+ */
+struct optee_rtc {
+       struct device *dev;
+       struct tee_context *ctx;
+       u32 session_id;
+       struct tee_shm *shm;
+       u64 features;
+};
+
+static int optee_rtc_readtime(struct device *dev, struct rtc_time *tm)
+{
+       struct optee_rtc *priv = dev_get_drvdata(dev);
+       struct tee_ioctl_invoke_arg inv_arg = {0};
+       struct optee_rtc_time *optee_tm;
+       struct tee_param param[4] = {0};
+       int ret;
+
+       inv_arg.func = TA_CMD_RTC_GET_TIME;
+       inv_arg.session = priv->session_id;
+       inv_arg.num_params = 4;
+
+       /* Fill invoke cmd params */
+       param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+       param[0].u.memref.shm = priv->shm;
+       param[0].u.memref.size = sizeof(struct optee_rtc_time);
+
+       ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+       if (ret < 0 || inv_arg.ret != 0)
+               return ret ? ret : -EPROTO;
+
+       optee_tm = tee_shm_get_va(priv->shm, 0);
+       if (IS_ERR(optee_tm))
+               return PTR_ERR(optee_tm);
+
+       if (param[0].u.memref.size != sizeof(*optee_tm))
+               return -EPROTO;
+
+       tm->tm_sec = optee_tm->tm_sec;
+       tm->tm_min = optee_tm->tm_min;
+       tm->tm_hour = optee_tm->tm_hour;
+       tm->tm_mday = optee_tm->tm_mday;
+       tm->tm_mon = optee_tm->tm_mon;
+       tm->tm_year = optee_tm->tm_year - 1900;
+       tm->tm_wday = optee_tm->tm_wday;
+       tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
+
+       return 0;
+}
+
+static int optee_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+       struct optee_rtc *priv = dev_get_drvdata(dev);
+       struct tee_ioctl_invoke_arg inv_arg = {0};
+       struct tee_param param[4] = {0};
+       struct optee_rtc_time optee_tm;
+       void *rtc_data;
+       int ret;
+
+       optee_tm.tm_sec = tm->tm_sec;
+       optee_tm.tm_min = tm->tm_min;
+       optee_tm.tm_hour = tm->tm_hour;
+       optee_tm.tm_mday = tm->tm_mday;
+       optee_tm.tm_mon = tm->tm_mon;
+       optee_tm.tm_year = tm->tm_year + 1900;
+       optee_tm.tm_wday = tm->tm_wday;
+
+       inv_arg.func = TA_CMD_RTC_SET_TIME;
+       inv_arg.session = priv->session_id;
+       inv_arg.num_params = 4;
+
+       param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+       param[0].u.memref.shm = priv->shm;
+       param[0].u.memref.size = sizeof(struct optee_rtc_time);
+
+       rtc_data = tee_shm_get_va(priv->shm, 0);
+       if (IS_ERR(rtc_data))
+               return PTR_ERR(rtc_data);
+
+       memcpy(rtc_data, &optee_tm, sizeof(struct optee_rtc_time));
+
+       ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+       if (ret < 0 || inv_arg.ret != 0)
+               return ret ? ret : -EPROTO;
+
+       return 0;
+}
+
+static int optee_rtc_readoffset(struct device *dev, long *offset)
+{
+       struct optee_rtc *priv = dev_get_drvdata(dev);
+       struct tee_ioctl_invoke_arg inv_arg = {0};
+       struct tee_param param[4] = {0};
+       int ret;
+
+       if (!(priv->features & TA_RTC_FEATURE_CORRECTION))
+               return -EOPNOTSUPP;
+
+       inv_arg.func = TA_CMD_RTC_GET_OFFSET;
+       inv_arg.session = priv->session_id;
+       inv_arg.num_params = 4;
+
+       param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
+
+       ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+       if (ret < 0 || inv_arg.ret != 0)
+               return ret ? ret : -EPROTO;
+
+       *offset = param[0].u.value.a;
+
+       return 0;
+}
+
+static int optee_rtc_setoffset(struct device *dev, long offset)
+{
+       struct optee_rtc *priv = dev_get_drvdata(dev);
+       struct tee_ioctl_invoke_arg inv_arg = {0};
+       struct tee_param param[4] = {0};
+       int ret;
+
+       if (!(priv->features & TA_RTC_FEATURE_CORRECTION))
+               return -EOPNOTSUPP;
+
+       inv_arg.func = TA_CMD_RTC_SET_OFFSET;
+       inv_arg.session = priv->session_id;
+       inv_arg.num_params = 4;
+
+       param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+       param[0].u.value.a = offset;
+
+       ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+       if (ret < 0 || inv_arg.ret != 0)
+               return ret ? ret : -EPROTO;
+
+       return 0;
+}
+
+static const struct rtc_class_ops optee_rtc_ops = {
+       .read_time      = optee_rtc_readtime,
+       .set_time       = optee_rtc_settime,
+       .set_offset     = optee_rtc_setoffset,
+       .read_offset    = optee_rtc_readoffset,
+};
+
+static int optee_rtc_read_info(struct device *dev, struct rtc_device *rtc,
+                              u64 *features)
+{
+       struct optee_rtc *priv = dev_get_drvdata(dev);
+       struct tee_ioctl_invoke_arg inv_arg = {0};
+       struct tee_param param[4] = {0};
+       struct optee_rtc_info *info;
+       struct optee_rtc_time *tm;
+       int ret;
+
+       inv_arg.func = TA_CMD_RTC_GET_INFO;
+       inv_arg.session = priv->session_id;
+       inv_arg.num_params = 4;
+
+       param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+       param[0].u.memref.shm = priv->shm;
+       param[0].u.memref.size = sizeof(*info);
+
+       ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+       if (ret < 0 || inv_arg.ret != 0)
+               return ret ? ret : -EPROTO;
+
+       info = tee_shm_get_va(priv->shm, 0);
+       if (IS_ERR(info))
+               return PTR_ERR(info);
+
+       if (param[0].u.memref.size != sizeof(*info))
+               return -EPROTO;
+
+       if (info->version != RTC_INFO_VERSION)
+               return -EPROTO;
+
+       *features = info->features;
+
+       tm = &info->range_min;
+       rtc->range_min = mktime64(tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min,
+                                 tm->tm_sec);
+       tm = &info->range_max;
+       rtc->range_max = mktime64(tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min,
+                                 tm->tm_sec);
+
+       return 0;
+}
+
+static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+       if (ver->impl_id == TEE_IMPL_ID_OPTEE)
+               return 1;
+       else
+               return 0;
+}
+
+static int optee_rtc_probe(struct device *dev)
+{
+       struct tee_client_device *rtc_device = to_tee_client_device(dev);
+       struct tee_ioctl_open_session_arg sess_arg;
+       struct optee_rtc *priv;
+       struct rtc_device *rtc;
+       struct tee_shm *shm;
+       int ret, err;
+
+       memset(&sess_arg, 0, sizeof(sess_arg));
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       rtc = devm_rtc_allocate_device(dev);
+       if (IS_ERR(rtc))
+               return PTR_ERR(rtc);
+
+       /* Open context with TEE driver */
+       priv->ctx = tee_client_open_context(NULL, optee_ctx_match, NULL, NULL);
+       if (IS_ERR(priv->ctx))
+               return -ENODEV;
+
+       /* Open session with rtc Trusted App */
+       export_uuid(sess_arg.uuid, &rtc_device->id.uuid);
+       sess_arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL;
+
+       ret = tee_client_open_session(priv->ctx, &sess_arg, NULL);
+       if (ret < 0 || sess_arg.ret != 0) {
+               dev_err(dev, "tee_client_open_session failed, err: %x\n", sess_arg.ret);
+               err = -EINVAL;
+               goto out_ctx;
+       }
+       priv->session_id = sess_arg.session;
+
+       shm = tee_shm_alloc_kernel_buf(priv->ctx, sizeof(struct optee_rtc_info));
+       if (IS_ERR(shm)) {
+               dev_err(priv->dev, "tee_shm_alloc_kernel_buf failed\n");
+               err = PTR_ERR(shm);
+               goto out_sess;
+       }
+
+       priv->shm = shm;
+       priv->dev = dev;
+       dev_set_drvdata(dev, priv);
+
+       rtc->ops = &optee_rtc_ops;
+
+       err = optee_rtc_read_info(dev, rtc, &priv->features);
+       if (err) {
+               dev_err(dev, "Failed to get RTC features from OP-TEE\n");
+               goto out_shm;
+       }
+
+       err = devm_rtc_register_device(rtc);
+       if (err)
+               goto out_shm;
+
+       /*
+        * We must clear this bit after registering because rtc_register_device
+        * will set it if it sees that .set_offset is provided.
+        */
+       if (!(priv->features & TA_RTC_FEATURE_CORRECTION))
+               clear_bit(RTC_FEATURE_CORRECTION, rtc->features);
+
+       return 0;
+
+out_shm:
+       tee_shm_free(priv->shm);
+out_sess:
+       tee_client_close_session(priv->ctx, priv->session_id);
+out_ctx:
+       tee_client_close_context(priv->ctx);
+
+       return err;
+}
+
+static int optee_rtc_remove(struct device *dev)
+{
+       struct optee_rtc *priv = dev_get_drvdata(dev);
+
+       tee_client_close_session(priv->ctx, priv->session_id);
+       tee_client_close_context(priv->ctx);
+
+       return 0;
+}
+
+static const struct tee_client_device_id optee_rtc_id_table[] = {
+       {UUID_INIT(0xf389f8c8, 0x845f, 0x496c,
+                  0x8b, 0xbe, 0xd6, 0x4b, 0xd2, 0x4c, 0x92, 0xfd)},
+       {}
+};
+
+MODULE_DEVICE_TABLE(tee, optee_rtc_id_table);
+
+static struct tee_client_driver optee_rtc_driver = {
+       .id_table       = optee_rtc_id_table,
+       .driver         = {
+               .name           = "optee_rtc",
+               .bus            = &tee_bus_type,
+               .probe          = optee_rtc_probe,
+               .remove         = optee_rtc_remove,
+       },
+};
+
+static int __init optee_rtc_mod_init(void)
+{
+       return driver_register(&optee_rtc_driver.driver);
+}
+
+static void __exit optee_rtc_mod_exit(void)
+{
+       driver_unregister(&optee_rtc_driver.driver);
+}
+
+module_init(optee_rtc_mod_init);
+module_exit(optee_rtc_mod_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Clément Léger <clement.leger@bootlin.com>");
+MODULE_DESCRIPTION("OP-TEE based RTC driver");
index 7473e6c8a183bf84a20848b31e3e846b7ac3bc20..e13b5e695d06a725ef6edb18972dc95ac8c6d84e 100644 (file)
@@ -427,7 +427,8 @@ static int pcf2123_probe(struct spi_device *spi)
         * support to this driver to generate interrupts more than once
         * per minute.
         */
-       rtc->uie_unsupported = 1;
+       set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->features);
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
        rtc->ops = &pcf2123_rtc_ops;
        rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
        rtc->range_max = RTC_TIMESTAMP_END_2099;
index 81a5b1f2e68c3bd121e715a4307ab3c9951426e9..63b275b014bd613a95b42dea4940f4d3c3035dfb 100644 (file)
@@ -374,7 +374,8 @@ static int pcf2127_watchdog_init(struct device *dev, struct pcf2127 *pcf2127)
 static int pcf2127_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
 {
        struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
-       unsigned int buf[5], ctrl2;
+       u8 buf[5];
+       unsigned int ctrl2;
        int ret;
 
        ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2);
@@ -655,13 +656,25 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
        pcf2127->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
        pcf2127->rtc->range_max = RTC_TIMESTAMP_END_2099;
        pcf2127->rtc->set_start_time = true; /* Sets actual start to 1970 */
-       pcf2127->rtc->uie_unsupported = 1;
+       set_bit(RTC_FEATURE_ALARM_RES_2S, pcf2127->rtc->features);
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, pcf2127->rtc->features);
        clear_bit(RTC_FEATURE_ALARM, pcf2127->rtc->features);
 
        if (alarm_irq > 0) {
+               unsigned long flags;
+
+               /*
+                * If flags = 0, devm_request_threaded_irq() will use IRQ flags
+                * obtained from device tree.
+                */
+               if (dev_fwnode(dev))
+                       flags = 0;
+               else
+                       flags = IRQF_TRIGGER_LOW;
+
                ret = devm_request_threaded_irq(dev, alarm_irq, NULL,
                                                pcf2127_rtc_irq,
-                                               IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+                                               flags | IRQF_ONESHOT,
                                                dev_name(dev), dev);
                if (ret) {
                        dev_err(dev, "failed to request alarm irq\n");
index df2b072c394d8ccea8a137669708ebb45b27a5f9..9760824ec199e8dda5a583e0dddd45eb06d4a6ca 100644 (file)
@@ -616,7 +616,8 @@ static int pcf85063_probe(struct i2c_client *client)
        pcf85063->rtc->ops = &pcf85063_rtc_ops;
        pcf85063->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
        pcf85063->rtc->range_max = RTC_TIMESTAMP_END_2099;
-       pcf85063->rtc->uie_unsupported = 1;
+       set_bit(RTC_FEATURE_ALARM_RES_2S, pcf85063->rtc->features);
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, pcf85063->rtc->features);
        clear_bit(RTC_FEATURE_ALARM, pcf85063->rtc->features);
 
        if (config->has_alarms && client->irq > 0) {
index c93acade720575449a6f1e43cafeaf2e9f1e2f40..b1b1943de84478f15198537fabd4d7335ea47964 100644 (file)
@@ -212,14 +212,6 @@ static int pcf8523_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
        if (err < 0)
                return err;
 
-       /* The alarm has no seconds, round up to nearest minute */
-       if (tm->time.tm_sec) {
-               time64_t alarm_time = rtc_tm_to_time64(&tm->time);
-
-               alarm_time += 60 - tm->time.tm_sec;
-               rtc_time64_to_tm(alarm_time, &tm->time);
-       }
-
        regs[0] = bin2bcd(tm->time.tm_min);
        regs[1] = bin2bcd(tm->time.tm_hour);
        regs[2] = bin2bcd(tm->time.tm_mday);
@@ -240,9 +232,9 @@ static int pcf8523_param_get(struct device *dev, struct rtc_param *param)
 {
        struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
        int ret;
+       u32 value;
 
        switch(param->param) {
-               u32 value;
 
        case RTC_PARAM_BACKUP_SWITCH_MODE:
                ret = regmap_read(pcf8523->regmap, PCF8523_REG_CONTROL3, &value);
@@ -279,9 +271,9 @@ static int pcf8523_param_get(struct device *dev, struct rtc_param *param)
 static int pcf8523_param_set(struct device *dev, struct rtc_param *param)
 {
        struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
+       u8 mode;
 
        switch(param->param) {
-               u8 mode;
        case RTC_PARAM_BACKUP_SWITCH_MODE:
                switch (param->uvalue) {
                case RTC_BSM_DISABLED:
@@ -450,7 +442,8 @@ static int pcf8523_probe(struct i2c_client *client,
        rtc->ops = &pcf8523_rtc_ops;
        rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
        rtc->range_max = RTC_TIMESTAMP_END_2099;
-       rtc->uie_unsupported = 1;
+       set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->features);
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
 
        if (client->irq > 0) {
                err = regmap_write(pcf8523->regmap, PCF8523_TMR_CLKOUT_CTRL, 0x38);
index c8bddfb941290cfc89e1f8f3cdd8d10529f41446..9d06813e2e6d4b4f1863c7b1ca0f538fedb72761 100644 (file)
@@ -330,19 +330,6 @@ static int pcf8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
        unsigned char buf[4];
        int err;
 
-       /* The alarm has no seconds, round up to nearest minute */
-       if (tm->time.tm_sec) {
-               time64_t alarm_time = rtc_tm_to_time64(&tm->time);
-
-               alarm_time += 60 - tm->time.tm_sec;
-               rtc_time64_to_tm(alarm_time, &tm->time);
-       }
-
-       dev_dbg(dev, "%s, min=%d hour=%d wday=%d mday=%d "
-               "enabled=%d pending=%d\n", __func__,
-               tm->time.tm_min, tm->time.tm_hour, tm->time.tm_wday,
-               tm->time.tm_mday, tm->enabled, tm->pending);
-
        buf[0] = bin2bcd(tm->time.tm_min);
        buf[1] = bin2bcd(tm->time.tm_hour);
        buf[2] = bin2bcd(tm->time.tm_mday);
@@ -565,7 +552,8 @@ static int pcf8563_probe(struct i2c_client *client,
 
        pcf8563->rtc->ops = &pcf8563_rtc_ops;
        /* the pcf8563 alarm only supports a minute accuracy */
-       pcf8563->rtc->uie_unsupported = 1;
+       set_bit(RTC_FEATURE_ALARM_RES_MINUTE, pcf8563->rtc->features);
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, pcf8563->rtc->features);
        pcf8563->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
        pcf8563->rtc->range_max = RTC_TIMESTAMP_END_2099;
        pcf8563->rtc->set_start_time = true;
index e38ee8848385505743d900cb1eed6bbfc5ff54c5..bad6a5d9c6839ca70905e3d46286b9729c1fd435 100644 (file)
@@ -350,9 +350,6 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
                }
        }
 
-       if (!adev->irq[0])
-               clear_bit(RTC_FEATURE_ALARM, ldata->rtc->features);
-
        device_init_wakeup(&adev->dev, true);
        ldata->rtc = devm_rtc_allocate_device(&adev->dev);
        if (IS_ERR(ldata->rtc)) {
@@ -360,6 +357,9 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
                goto out;
        }
 
+       if (!adev->irq[0])
+               clear_bit(RTC_FEATURE_ALARM, ldata->rtc->features);
+
        ldata->rtc->ops = ops;
        ldata->rtc->range_min = vendor->range_min;
        ldata->rtc->range_max = vendor->range_max;
index 29a1c65661e99d21a4c11754bb94aa379baba181..dc6d1476baa597374b396a81cba382d027dbea20 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/rtc.h>
 #include <linux/platform_device.h>
 #include <linux/pm.h>
+#include <linux/pm_wakeirq.h>
 #include <linux/regmap.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
@@ -83,7 +84,7 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
        const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
 
        if (!rtc_dd->allow_set_time)
-               return -EACCES;
+               return -ENODEV;
 
        secs = rtc_tm_to_time64(tm);
 
@@ -527,40 +528,28 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
                return rc;
        }
 
-       return devm_rtc_register_device(rtc_dd->rtc);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int pm8xxx_rtc_resume(struct device *dev)
-{
-       struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+       rc = devm_rtc_register_device(rtc_dd->rtc);
+       if (rc)
+               return rc;
 
-       if (device_may_wakeup(dev))
-               disable_irq_wake(rtc_dd->rtc_alarm_irq);
+       rc = dev_pm_set_wake_irq(&pdev->dev, rtc_dd->rtc_alarm_irq);
+       if (rc)
+               return rc;
 
        return 0;
 }
 
-static int pm8xxx_rtc_suspend(struct device *dev)
+static int pm8xxx_remove(struct platform_device *pdev)
 {
-       struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
-
-       if (device_may_wakeup(dev))
-               enable_irq_wake(rtc_dd->rtc_alarm_irq);
-
+       dev_pm_clear_wake_irq(&pdev->dev);
        return 0;
 }
-#endif
-
-static SIMPLE_DEV_PM_OPS(pm8xxx_rtc_pm_ops,
-                        pm8xxx_rtc_suspend,
-                        pm8xxx_rtc_resume);
 
 static struct platform_driver pm8xxx_rtc_driver = {
        .probe          = pm8xxx_rtc_probe,
+       .remove         = pm8xxx_remove,
        .driver = {
                .name           = "rtc-pm8xxx",
-               .pm             = &pm8xxx_rtc_pm_ops,
                .of_match_table = pm8xxx_id_table,
        },
 };
index b4a520056b1ab482caead3d02b07fe05fd6dab44..d4777b01ab220a5eb940684567cfdb8027d97e34 100644 (file)
@@ -204,8 +204,10 @@ static int spear_rtc_read_time(struct device *dev, struct rtc_time *tm)
        /* we don't report wday/yday/isdst ... */
        rtc_wait_not_busy(config);
 
-       time = readl(config->ioaddr + TIME_REG);
-       date = readl(config->ioaddr + DATE_REG);
+       do {
+               time = readl(config->ioaddr + TIME_REG);
+               date = readl(config->ioaddr + DATE_REG);
+       } while (time == readl(config->ioaddr + TIME_REG));
        tm->tm_sec = (time >> SECOND_SHIFT) & SECOND_MASK;
        tm->tm_min = (time >> MINUTE_SHIFT) & MIN_MASK;
        tm->tm_hour = (time >> HOUR_SHIFT) & HOUR_MASK;
@@ -352,6 +354,10 @@ static int spear_rtc_probe(struct platform_device *pdev)
        if (!config)
                return -ENOMEM;
 
+       config->rtc = devm_rtc_allocate_device(&pdev->dev);
+       if (IS_ERR(config->rtc))
+               return PTR_ERR(config->rtc);
+
        /* alarm irqs */
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
@@ -380,16 +386,13 @@ static int spear_rtc_probe(struct platform_device *pdev)
        spin_lock_init(&config->lock);
        platform_set_drvdata(pdev, config);
 
-       config->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
-                                       &spear_rtc_ops, THIS_MODULE);
-       if (IS_ERR(config->rtc)) {
-               dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
-                               PTR_ERR(config->rtc));
-               status = PTR_ERR(config->rtc);
-               goto err_disable_clock;
-       }
+       config->rtc->ops = &spear_rtc_ops;
+       config->rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
+       config->rtc->range_min = RTC_TIMESTAMP_END_9999;
 
-       config->rtc->uie_unsupported = 1;
+       status = devm_rtc_register_device(config->rtc);
+       if (status)
+               goto err_disable_clock;
 
        if (!device_can_wakeup(&pdev->dev))
                device_init_wakeup(&pdev->dev, 1);
index 711832c758aeaef535c42a9f8d0eb0811cddc75d..5b3e4da6340612f78eec1e691966b60cde6661ec 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
+#include <linux/clk/sunxi-ng.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/fs.h>
@@ -48,7 +49,8 @@
 
 /* Alarm 0 (counter) */
 #define SUN6I_ALRM_COUNTER                     0x0020
-#define SUN6I_ALRM_CUR_VAL                     0x0024
+/* This holds the remaining alarm seconds on older SoCs (current value) */
+#define SUN6I_ALRM_COUNTER_HMS                 0x0024
 #define SUN6I_ALRM_EN                          0x0028
 #define SUN6I_ALRM_EN_CNT_EN                   BIT(0)
 #define SUN6I_ALRM_IRQ_EN                      0x002c
 #define SUN6I_YEAR_MIN                         1970
 #define SUN6I_YEAR_OFF                         (SUN6I_YEAR_MIN - 1900)
 
+#define SECS_PER_DAY                           (24 * 3600ULL)
+
 /*
  * There are other differences between models, including:
  *
@@ -133,12 +137,15 @@ struct sun6i_rtc_clk_data {
        unsigned int has_auto_swt : 1;
 };
 
+#define RTC_LINEAR_DAY BIT(0)
+
 struct sun6i_rtc_dev {
        struct rtc_device *rtc;
        const struct sun6i_rtc_clk_data *data;
        void __iomem *base;
        int irq;
-       unsigned long alarm;
+       time64_t alarm;
+       unsigned long flags;
 
        struct clk_hw hw;
        struct clk_hw *int_osc;
@@ -363,23 +370,6 @@ CLK_OF_DECLARE_DRIVER(sun8i_h3_rtc_clk, "allwinner,sun8i-h3-rtc",
 CLK_OF_DECLARE_DRIVER(sun50i_h5_rtc_clk, "allwinner,sun50i-h5-rtc",
                      sun8i_h3_rtc_clk_init);
 
-static const struct sun6i_rtc_clk_data sun50i_h6_rtc_data = {
-       .rc_osc_rate = 16000000,
-       .fixed_prescaler = 32,
-       .has_prescaler = 1,
-       .has_out_clk = 1,
-       .export_iosc = 1,
-       .has_losc_en = 1,
-       .has_auto_swt = 1,
-};
-
-static void __init sun50i_h6_rtc_clk_init(struct device_node *node)
-{
-       sun6i_rtc_clk_init(node, &sun50i_h6_rtc_data);
-}
-CLK_OF_DECLARE_DRIVER(sun50i_h6_rtc_clk, "allwinner,sun50i-h6-rtc",
-                     sun50i_h6_rtc_clk_init);
-
 /*
  * The R40 user manual is self-conflicting on whether the prescaler is
  * fixed or configurable. The clock diagram shows it as fixed, but there
@@ -467,22 +457,30 @@ static int sun6i_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
        } while ((date != readl(chip->base + SUN6I_RTC_YMD)) ||
                 (time != readl(chip->base + SUN6I_RTC_HMS)));
 
+       if (chip->flags & RTC_LINEAR_DAY) {
+               /*
+                * Newer chips store a linear day number, the manual
+                * does not mandate any epoch base. The BSP driver uses
+                * the UNIX epoch, let's just copy that, as it's the
+                * easiest anyway.
+                */
+               rtc_time64_to_tm((date & 0xffff) * SECS_PER_DAY, rtc_tm);
+       } else {
+               rtc_tm->tm_mday = SUN6I_DATE_GET_DAY_VALUE(date);
+               rtc_tm->tm_mon  = SUN6I_DATE_GET_MON_VALUE(date) - 1;
+               rtc_tm->tm_year = SUN6I_DATE_GET_YEAR_VALUE(date);
+
+               /*
+                * switch from (data_year->min)-relative offset to
+                * a (1900)-relative one
+                */
+               rtc_tm->tm_year += SUN6I_YEAR_OFF;
+       }
+
        rtc_tm->tm_sec  = SUN6I_TIME_GET_SEC_VALUE(time);
        rtc_tm->tm_min  = SUN6I_TIME_GET_MIN_VALUE(time);
        rtc_tm->tm_hour = SUN6I_TIME_GET_HOUR_VALUE(time);
 
-       rtc_tm->tm_mday = SUN6I_DATE_GET_DAY_VALUE(date);
-       rtc_tm->tm_mon  = SUN6I_DATE_GET_MON_VALUE(date);
-       rtc_tm->tm_year = SUN6I_DATE_GET_YEAR_VALUE(date);
-
-       rtc_tm->tm_mon  -= 1;
-
-       /*
-        * switch from (data_year->min)-relative offset to
-        * a (1900)-relative one
-        */
-       rtc_tm->tm_year += SUN6I_YEAR_OFF;
-
        return 0;
 }
 
@@ -510,36 +508,54 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
        struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
        struct rtc_time *alrm_tm = &wkalrm->time;
        struct rtc_time tm_now;
-       unsigned long time_now = 0;
-       unsigned long time_set = 0;
-       unsigned long time_gap = 0;
-       int ret = 0;
-
-       ret = sun6i_rtc_gettime(dev, &tm_now);
-       if (ret < 0) {
-               dev_err(dev, "Error in getting time\n");
-               return -EINVAL;
-       }
+       time64_t time_set;
+       u32 counter_val, counter_val_hms;
+       int ret;
 
        time_set = rtc_tm_to_time64(alrm_tm);
-       time_now = rtc_tm_to_time64(&tm_now);
-       if (time_set <= time_now) {
-               dev_err(dev, "Date to set in the past\n");
-               return -EINVAL;
-       }
-
-       time_gap = time_set - time_now;
 
-       if (time_gap > U32_MAX) {
-               dev_err(dev, "Date too far in the future\n");
-               return -EINVAL;
+       if (chip->flags & RTC_LINEAR_DAY) {
+               /*
+                * The alarm registers hold the actual alarm time, encoded
+                * in the same way (linear day + HMS) as the current time.
+                */
+               counter_val_hms = SUN6I_TIME_SET_SEC_VALUE(alrm_tm->tm_sec)  |
+                                 SUN6I_TIME_SET_MIN_VALUE(alrm_tm->tm_min)  |
+                                 SUN6I_TIME_SET_HOUR_VALUE(alrm_tm->tm_hour);
+               /* The division will cut off the H:M:S part of alrm_tm. */
+               counter_val = div_u64(rtc_tm_to_time64(alrm_tm), SECS_PER_DAY);
+       } else {
+               /* The alarm register holds the number of seconds left. */
+               time64_t time_now;
+
+               ret = sun6i_rtc_gettime(dev, &tm_now);
+               if (ret < 0) {
+                       dev_err(dev, "Error in getting time\n");
+                       return -EINVAL;
+               }
+
+               time_now = rtc_tm_to_time64(&tm_now);
+               if (time_set <= time_now) {
+                       dev_err(dev, "Date to set in the past\n");
+                       return -EINVAL;
+               }
+               if ((time_set - time_now) > U32_MAX) {
+                       dev_err(dev, "Date too far in the future\n");
+                       return -EINVAL;
+               }
+
+               counter_val = time_set - time_now;
        }
 
        sun6i_rtc_setaie(0, chip);
        writel(0, chip->base + SUN6I_ALRM_COUNTER);
+       if (chip->flags & RTC_LINEAR_DAY)
+               writel(0, chip->base + SUN6I_ALRM_COUNTER_HMS);
        usleep_range(100, 300);
 
-       writel(time_gap, chip->base + SUN6I_ALRM_COUNTER);
+       writel(counter_val, chip->base + SUN6I_ALRM_COUNTER);
+       if (chip->flags & RTC_LINEAR_DAY)
+               writel(counter_val_hms, chip->base + SUN6I_ALRM_COUNTER_HMS);
        chip->alarm = time_set;
 
        sun6i_rtc_setaie(wkalrm->enabled, chip);
@@ -571,20 +587,25 @@ static int sun6i_rtc_settime(struct device *dev, struct rtc_time *rtc_tm)
        u32 date = 0;
        u32 time = 0;
 
-       rtc_tm->tm_year -= SUN6I_YEAR_OFF;
-       rtc_tm->tm_mon += 1;
-
-       date = SUN6I_DATE_SET_DAY_VALUE(rtc_tm->tm_mday) |
-               SUN6I_DATE_SET_MON_VALUE(rtc_tm->tm_mon)  |
-               SUN6I_DATE_SET_YEAR_VALUE(rtc_tm->tm_year);
-
-       if (is_leap_year(rtc_tm->tm_year + SUN6I_YEAR_MIN))
-               date |= SUN6I_LEAP_SET_VALUE(1);
-
        time = SUN6I_TIME_SET_SEC_VALUE(rtc_tm->tm_sec)  |
                SUN6I_TIME_SET_MIN_VALUE(rtc_tm->tm_min)  |
                SUN6I_TIME_SET_HOUR_VALUE(rtc_tm->tm_hour);
 
+       if (chip->flags & RTC_LINEAR_DAY) {
+               /* The division will cut off the H:M:S part of rtc_tm. */
+               date = div_u64(rtc_tm_to_time64(rtc_tm), SECS_PER_DAY);
+       } else {
+               rtc_tm->tm_year -= SUN6I_YEAR_OFF;
+               rtc_tm->tm_mon += 1;
+
+               date = SUN6I_DATE_SET_DAY_VALUE(rtc_tm->tm_mday) |
+                       SUN6I_DATE_SET_MON_VALUE(rtc_tm->tm_mon)  |
+                       SUN6I_DATE_SET_YEAR_VALUE(rtc_tm->tm_year);
+
+               if (is_leap_year(rtc_tm->tm_year + SUN6I_YEAR_MIN))
+                       date |= SUN6I_LEAP_SET_VALUE(1);
+       }
+
        /* Check whether registers are writable */
        if (sun6i_rtc_wait(chip, SUN6I_LOSC_CTRL,
                           SUN6I_LOSC_CTRL_ACC_MASK, 50)) {
@@ -668,11 +689,35 @@ static int sun6i_rtc_resume(struct device *dev)
 static SIMPLE_DEV_PM_OPS(sun6i_rtc_pm_ops,
        sun6i_rtc_suspend, sun6i_rtc_resume);
 
+static void sun6i_rtc_bus_clk_cleanup(void *data)
+{
+       struct clk *bus_clk = data;
+
+       clk_disable_unprepare(bus_clk);
+}
+
 static int sun6i_rtc_probe(struct platform_device *pdev)
 {
        struct sun6i_rtc_dev *chip = sun6i_rtc;
+       struct device *dev = &pdev->dev;
+       struct clk *bus_clk;
        int ret;
 
+       bus_clk = devm_clk_get_optional(dev, "bus");
+       if (IS_ERR(bus_clk))
+               return PTR_ERR(bus_clk);
+
+       if (bus_clk) {
+               ret = clk_prepare_enable(bus_clk);
+               if (ret)
+                       return ret;
+
+               ret = devm_add_action_or_reset(dev, sun6i_rtc_bus_clk_cleanup,
+                                              bus_clk);
+               if (ret)
+                       return ret;
+       }
+
        if (!chip) {
                chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
                if (!chip)
@@ -683,10 +728,18 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
                chip->base = devm_platform_ioremap_resource(pdev, 0);
                if (IS_ERR(chip->base))
                        return PTR_ERR(chip->base);
+
+               if (IS_REACHABLE(CONFIG_SUN6I_RTC_CCU)) {
+                       ret = sun6i_rtc_ccu_probe(dev, chip->base);
+                       if (ret)
+                               return ret;
+               }
        }
 
        platform_set_drvdata(pdev, chip);
 
+       chip->flags = (unsigned long)of_device_get_match_data(&pdev->dev);
+
        chip->irq = platform_get_irq(pdev, 0);
        if (chip->irq < 0)
                return chip->irq;
@@ -733,7 +786,10 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
                return PTR_ERR(chip->rtc);
 
        chip->rtc->ops = &sun6i_rtc_ops;
-       chip->rtc->range_max = 2019686399LL; /* 2033-12-31 23:59:59 */
+       if (chip->flags & RTC_LINEAR_DAY)
+               chip->rtc->range_max = (65536 * SECS_PER_DAY) - 1;
+       else
+               chip->rtc->range_max = 2019686399LL; /* 2033-12-31 23:59:59 */
 
        ret = devm_rtc_register_device(chip->rtc);
        if (ret)
@@ -758,6 +814,8 @@ static const struct of_device_id sun6i_rtc_dt_ids[] = {
        { .compatible = "allwinner,sun8i-v3-rtc" },
        { .compatible = "allwinner,sun50i-h5-rtc" },
        { .compatible = "allwinner,sun50i-h6-rtc" },
+       { .compatible = "allwinner,sun50i-h616-rtc",
+               .data = (void *)RTC_LINEAR_DAY },
        { /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, sun6i_rtc_dt_ids);
index 2018614f258f6f83cf9897897500022a49fd20ae..6eaa9321c074102c58fdf3aa8069a82936cab8be 100644 (file)
@@ -432,14 +432,21 @@ static int wm8350_rtc_probe(struct platform_device *pdev)
                return ret;
        }
 
-       wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC,
+       ret = wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC,
                            wm8350_rtc_update_handler, 0,
                            "RTC Seconds", wm8350);
+       if (ret)
+               return ret;
+
        wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC);
 
-       wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM,
+       ret = wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM,
                            wm8350_rtc_alarm_handler, 0,
                            "RTC Alarm", wm8350);
+       if (ret) {
+               wm8350_free_irq(wm8350, WM8350_IRQ_RTC_SEC, wm8350);
+               return ret;
+       }
 
        return 0;
 }
index cf68a9b1c9ebbc64cb37620ecc0331dfb2481d76..d3d0054e21fd02cf633b23d1071ad4a3450e19e5 100644 (file)
@@ -180,8 +180,6 @@ static int xgene_rtc_probe(struct platform_device *pdev)
                return ret;
        }
 
-       /* HW does not support update faster than 1 seconds */
-       pdata->rtc->uie_unsupported = 1;
        pdata->rtc->ops = &xgene_rtc_ops;
        pdata->rtc->range_max = U32_MAX;
 
index f0763e36b86168d6f79663303dd281fd0ec12ad3..cb24917619589e845a629dd82c75b99f3f5f9564 100644 (file)
@@ -745,9 +745,7 @@ sclp_sync_wait(void)
        /* Loop until driver state indicates finished request */
        while (sclp_running_state != sclp_running_state_idle) {
                /* Check for expired request timer */
-               if (timer_pending(&sclp_request_timer) &&
-                   get_tod_clock_fast() > timeout &&
-                   del_timer(&sclp_request_timer))
+               if (get_tod_clock_fast() > timeout && del_timer(&sclp_request_timer))
                        sclp_request_timer.function(&sclp_request_timer);
                cpu_relax();
        }
index de028868c6f4a4b5910c1518e8b46b293e95107b..fe5ee2646fcf14b14117e506f55ac8bb78d96d50 100644 (file)
@@ -109,8 +109,7 @@ static void sclp_console_sync_queue(void)
        unsigned long flags;
 
        spin_lock_irqsave(&sclp_con_lock, flags);
-       if (timer_pending(&sclp_con_timer))
-               del_timer(&sclp_con_timer);
+       del_timer(&sclp_con_timer);
        while (sclp_con_queue_running) {
                spin_unlock_irqrestore(&sclp_con_lock, flags);
                sclp_sync_wait();
index 7bc4e4a109372451e52782a124787c5165fb468f..3b4e7e5d9b71d1abd12c3a7dd5e54c88351da260 100644 (file)
@@ -231,8 +231,7 @@ sclp_vt220_emit_current(void)
                        list_add_tail(&sclp_vt220_current_request->list,
                                      &sclp_vt220_outqueue);
                        sclp_vt220_current_request = NULL;
-                       if (timer_pending(&sclp_vt220_timer))
-                               del_timer(&sclp_vt220_timer);
+                       del_timer(&sclp_vt220_timer);
                }
                sclp_vt220_flush_later = 0;
        }
@@ -776,8 +775,7 @@ static void __sclp_vt220_flush_buffer(void)
 
        sclp_vt220_emit_current();
        spin_lock_irqsave(&sclp_vt220_lock, flags);
-       if (timer_pending(&sclp_vt220_timer))
-               del_timer(&sclp_vt220_timer);
+       del_timer(&sclp_vt220_timer);
        while (sclp_vt220_queue_running) {
                spin_unlock_irqrestore(&sclp_vt220_lock, flags);
                sclp_sync_wait();
index 7ada994d459256612595c4fd96d03c268f8ea96f..38cc1565d6aee3db444d734fb6f0da353d26d7dc 100644 (file)
@@ -354,10 +354,10 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
        if ((
                sense[0] == SENSE_DATA_CHECK      ||
                sense[0] == SENSE_EQUIPMENT_CHECK ||
-               sense[0] == SENSE_EQUIPMENT_CHECK + SENSE_DEFERRED_UNIT_CHECK
+               sense[0] == (SENSE_EQUIPMENT_CHECK | SENSE_DEFERRED_UNIT_CHECK)
        ) && (
                sense[1] == SENSE_DRIVE_ONLINE ||
-               sense[1] == SENSE_BEGINNING_OF_TAPE + SENSE_WRITE_MODE
+               sense[1] == (SENSE_BEGINNING_OF_TAPE | SENSE_WRITE_MODE)
        )) {
                switch (request->op) {
                /*
index 05e136cfb8be7226381570392d282d83aafad375..6d63b968309a8dba95b806718952ff504651641c 100644 (file)
@@ -113,16 +113,10 @@ ccw_device_timeout(struct timer_list *t)
 void
 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
 {
-       if (expires == 0) {
+       if (expires == 0)
                del_timer(&cdev->private->timer);
-               return;
-       }
-       if (timer_pending(&cdev->private->timer)) {
-               if (mod_timer(&cdev->private->timer, jiffies + expires))
-                       return;
-       }
-       cdev->private->timer.expires = jiffies + expires;
-       add_timer(&cdev->private->timer);
+       else
+               mod_timer(&cdev->private->timer, jiffies + expires);
 }
 
 int
index 8b463681a14902f93a8aabbdf73556f4e4a1f512..ab6a7495180a40c37e7ec53377887e91f652c1e4 100644 (file)
@@ -112,16 +112,10 @@ static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
 {
        struct eadm_private *private = get_eadm_private(sch);
 
-       if (expires == 0) {
+       if (expires == 0)
                del_timer(&private->timer);
-               return;
-       }
-       if (timer_pending(&private->timer)) {
-               if (mod_timer(&private->timer, jiffies + expires))
-                       return;
-       }
-       private->timer.expires = jiffies + expires;
-       add_timer(&private->timer);
+       else
+               mod_timer(&private->timer, jiffies + expires);
 }
 
 static void eadm_subchannel_irq(struct subchannel *sch)
index 8fd5a17bdf994a0db39c58ab37e5e725c2b8a078..6a65885f5f43fcc821bd340dd214791569b66a6a 100644 (file)
@@ -315,6 +315,7 @@ struct ap_perms {
        unsigned long ioctlm[BITS_TO_LONGS(AP_IOCTLS)];
        unsigned long apm[BITS_TO_LONGS(AP_DEVICES)];
        unsigned long aqm[BITS_TO_LONGS(AP_DOMAINS)];
+       unsigned long adm[BITS_TO_LONGS(AP_DOMAINS)];
 };
 extern struct ap_perms ap_perms;
 extern struct mutex ap_perms_mutex;
index cf23ce1b1146564aea4ef32f0232f382c52de539..7f69ca695fc2a83392e727654668d1220969533e 100644 (file)
@@ -155,7 +155,7 @@ static int pkey_skey2pkey(const u8 *key, struct pkey_protkey *pkey)
        /*
         * The cca_xxx2protkey call may fail when a card has been
         * addressed where the master key was changed after last fetch
-        * of the mkvp into the cache. Try 3 times: First witout verify
+        * of the mkvp into the cache. Try 3 times: First without verify
         * then with verify and last round with verify and old master
         * key verification pattern match not ignored.
         */
index 7dc26365e29aa326322c2fd4c13c1fed016201f5..6e08d04b605d6e7389c28062b7ab5e0e81c3b97d 100644 (file)
@@ -1189,13 +1189,6 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
  * @matrix_mdev: a mediated matrix device
  * @kvm: reference to KVM instance
  *
- * Note: The matrix_dev->lock must be taken prior to calling
- * this function; however, the lock will be temporarily released while the
- * guest's AP configuration is set to avoid a potential lockdep splat.
- * The kvm->lock is taken to set the guest's AP configuration which, under
- * certain circumstances, will result in a circular lock dependency if this is
- * done under the @matrix_mdev->lock.
- *
  * Return: 0 if no other mediated matrix device has a reference to @kvm;
  * otherwise, returns an -EPERM.
  */
@@ -1269,18 +1262,11 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
  * by @matrix_mdev.
  *
  * @matrix_mdev: a matrix mediated device
- * @kvm: the pointer to the kvm structure being unset.
- *
- * Note: The matrix_dev->lock must be taken prior to calling
- * this function; however, the lock will be temporarily released while the
- * guest's AP configuration is cleared to avoid a potential lockdep splat.
- * The kvm->lock is taken to clear the guest's AP configuration which, under
- * certain circumstances, will result in a circular lock dependency if this is
- * done under the @matrix_mdev->lock.
  */
-static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev,
-                                  struct kvm *kvm)
+static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
 {
+       struct kvm *kvm = matrix_mdev->kvm;
+
        if (kvm && kvm->arch.crypto.crycbd) {
                down_write(&kvm->arch.crypto.pqap_hook_rwsem);
                kvm->arch.crypto.pqap_hook = NULL;
@@ -1311,7 +1297,7 @@ static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
        matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
 
        if (!data)
-               vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm);
+               vfio_ap_mdev_unset_kvm(matrix_mdev);
        else if (vfio_ap_mdev_set_kvm(matrix_mdev, data))
                notify_rc = NOTIFY_DONE;
 
@@ -1448,7 +1434,7 @@ static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
                                 &matrix_mdev->iommu_notifier);
        vfio_unregister_notifier(vdev->dev, VFIO_GROUP_NOTIFY,
                                 &matrix_mdev->group_notifier);
-       vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm);
+       vfio_ap_mdev_unset_kvm(matrix_mdev);
 }
 
 static int vfio_ap_mdev_get_device_info(unsigned long arg)
index 80e2a306709a20ca72d317602bd040cf78fc983a..aa6dc3c0c353d7eaf0594e0495189a3119d469f8 100644 (file)
@@ -285,10 +285,53 @@ static ssize_t aqmask_store(struct device *dev,
 
 static DEVICE_ATTR_RW(aqmask);
 
+static ssize_t admask_show(struct device *dev,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       int i, rc;
+       struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+       if (mutex_lock_interruptible(&ap_perms_mutex))
+               return -ERESTARTSYS;
+
+       buf[0] = '0';
+       buf[1] = 'x';
+       for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++)
+               snprintf(buf + 2 + 2 * i * sizeof(long),
+                        PAGE_SIZE - 2 - 2 * i * sizeof(long),
+                        "%016lx", zcdndev->perms.adm[i]);
+       buf[2 + 2 * i * sizeof(long)] = '\n';
+       buf[2 + 2 * i * sizeof(long) + 1] = '\0';
+       rc = 2 + 2 * i * sizeof(long) + 1;
+
+       mutex_unlock(&ap_perms_mutex);
+
+       return rc;
+}
+
+static ssize_t admask_store(struct device *dev,
+                           struct device_attribute *attr,
+                           const char *buf, size_t count)
+{
+       int rc;
+       struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+       rc = ap_parse_mask_str(buf, zcdndev->perms.adm,
+                              AP_DOMAINS, &ap_perms_mutex);
+       if (rc)
+               return rc;
+
+       return count;
+}
+
+static DEVICE_ATTR_RW(admask);
+
 static struct attribute *zcdn_dev_attrs[] = {
        &dev_attr_ioctlmask.attr,
        &dev_attr_apmask.attr,
        &dev_attr_aqmask.attr,
+       &dev_attr_admask.attr,
        NULL
 };
 
@@ -880,11 +923,22 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
        if (rc)
                goto out;
 
+       tdom = *domain;
+       if (perms != &ap_perms && tdom < AP_DOMAINS) {
+               if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
+                       if (!test_bit_inv(tdom, perms->adm)) {
+                               rc = -ENODEV;
+                               goto out;
+                       }
+               } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
+                       rc = -EOPNOTSUPP;
+                       goto out;
+               }
+       }
        /*
         * If a valid target domain is set and this domain is NOT a usage
         * domain but a control only domain, autoselect target domain.
         */
-       tdom = *domain;
        if (tdom < AP_DOMAINS &&
            !ap_test_config_usage_domain(tdom) &&
            ap_test_config_ctrl_domain(tdom))
@@ -1062,6 +1116,18 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
        if (rc)
                goto out_free;
 
+       if (perms != &ap_perms && domain < AUTOSEL_DOM) {
+               if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
+                       if (!test_bit_inv(domain, perms->adm)) {
+                               rc = -ENODEV;
+                               goto out_free;
+                       }
+               } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
+                       rc = -EOPNOTSUPP;
+                       goto out_free;
+               }
+       }
+
        pref_zc = NULL;
        pref_zq = NULL;
        spin_lock(&zcrypt_list_lock);
index 3e259befd30addb51154b53c89da64200dcbc757..fcbd537530e8469bdbf006145f03fe10d54fff64 100644 (file)
@@ -90,7 +90,7 @@ static ssize_t online_store(struct device *dev,
        list_for_each_entry(zq, &zc->zqueues, list)
                maxzqs++;
        if (maxzqs > 0)
-               zq_uelist = kcalloc(maxzqs + 1, sizeof(zq), GFP_ATOMIC);
+               zq_uelist = kcalloc(maxzqs + 1, sizeof(*zq_uelist), GFP_ATOMIC);
        list_for_each_entry(zq, &zc->zqueues, list)
                if (zcrypt_queue_force_online(zq, online))
                        if (zq_uelist) {
index 9ce5a71da69b822cfb8a59d3f4b8dce4c0233939..98d33f932b0bbb74b40bbdce81fd625f2d155643 100644 (file)
@@ -1109,7 +1109,7 @@ static int ep11_wrapkey(u16 card, u16 domain,
        if (kb->head.type == TOKTYPE_NON_CCA &&
            kb->head.version == TOKVER_EP11_AES) {
                has_header = true;
-               keysize = kb->head.len < keysize ? kb->head.len : keysize;
+               keysize = min_t(size_t, kb->head.len, keysize);
        }
 
        /* request cprb and payload */
index 5f554a3a0f626f54e9b20d6dd0af7deeb7b97973..caeebfb67149981a132c468bd56a9f8d35b12bce 100644 (file)
@@ -317,14 +317,18 @@ enum {
 };
 
 struct aha152x_cmd_priv {
-       struct scsi_pointer scsi_pointer;
+       char *ptr;
+       int this_residual;
+       struct scatterlist *buffer;
+       int status;
+       int message;
+       int sent_command;
+       int phase;
 };
 
-static struct scsi_pointer *aha152x_scsi_pointer(struct scsi_cmnd *cmd)
+static struct aha152x_cmd_priv *aha152x_priv(struct scsi_cmnd *cmd)
 {
-       struct aha152x_cmd_priv *acmd = scsi_cmd_priv(cmd);
-
-       return &acmd->scsi_pointer;
+       return scsi_cmd_priv(cmd);
 }
 
 MODULE_AUTHOR("Jürgen Fischer");
@@ -890,17 +894,16 @@ void aha152x_release(struct Scsi_Host *shpnt)
 static int setup_expected_interrupts(struct Scsi_Host *shpnt)
 {
        if(CURRENT_SC) {
-               struct scsi_pointer *scsi_pointer =
-                       aha152x_scsi_pointer(CURRENT_SC);
+               struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
 
-               scsi_pointer->phase |= 1 << 16;
+               acp->phase |= 1 << 16;
 
-               if (scsi_pointer->phase & selecting) {
+               if (acp->phase & selecting) {
                        SETPORT(SSTAT1, SELTO);
                        SETPORT(SIMODE0, ENSELDO | (DISCONNECTED_SC ? ENSELDI : 0));
                        SETPORT(SIMODE1, ENSELTIMO);
                } else {
-                       SETPORT(SIMODE0, (scsi_pointer->phase & spiordy) ? ENSPIORDY : 0);
+                       SETPORT(SIMODE0, (acp->phase & spiordy) ? ENSPIORDY : 0);
                        SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE);
                }
        } else if(STATE==seldi) {
@@ -924,17 +927,16 @@ static int setup_expected_interrupts(struct Scsi_Host *shpnt)
 static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
                                  struct completion *complete, int phase)
 {
-       struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(SCpnt);
+       struct aha152x_cmd_priv *acp = aha152x_priv(SCpnt);
        struct Scsi_Host *shpnt = SCpnt->device->host;
        unsigned long flags;
 
-       scsi_pointer->phase        = not_issued | phase;
-       scsi_pointer->Status       = 0x1; /* Ilegal status by SCSI standard */
-       scsi_pointer->Message      = 0;
-       scsi_pointer->have_data_in = 0;
-       scsi_pointer->sent_command = 0;
+       acp->phase        = not_issued | phase;
+       acp->status       = 0x1; /* Illegal status by SCSI standard */
+       acp->message      = 0;
+       acp->sent_command = 0;
 
-       if (scsi_pointer->phase & (resetting | check_condition)) {
+       if (acp->phase & (resetting | check_condition)) {
                if (!SCpnt->host_scribble || SCSEM(SCpnt) || SCNEXT(SCpnt)) {
                        scmd_printk(KERN_ERR, SCpnt, "cannot reuse command\n");
                        return FAILED;
@@ -957,15 +959,15 @@ static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
           SCp.phase            : current state of the command */
 
        if ((phase & resetting) || !scsi_sglist(SCpnt)) {
-               scsi_pointer->ptr           = NULL;
-               scsi_pointer->this_residual = 0;
+               acp->ptr           = NULL;
+               acp->this_residual = 0;
                scsi_set_resid(SCpnt, 0);
-               scsi_pointer->buffer        = NULL;
+               acp->buffer        = NULL;
        } else {
                scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
-               scsi_pointer->buffer        = scsi_sglist(SCpnt);
-               scsi_pointer->ptr           = SG_ADDRESS(scsi_pointer->buffer);
-               scsi_pointer->this_residual = scsi_pointer->buffer->length;
+               acp->buffer        = scsi_sglist(SCpnt);
+               acp->ptr           = SG_ADDRESS(acp->buffer);
+               acp->this_residual = acp->buffer->length;
        }
 
        DO_LOCK(flags);
@@ -1015,7 +1017,7 @@ static void reset_done(struct scsi_cmnd *SCpnt)
 
 static void aha152x_scsi_done(struct scsi_cmnd *SCpnt)
 {
-       if (aha152x_scsi_pointer(SCpnt)->phase & resetting)
+       if (aha152x_priv(SCpnt)->phase & resetting)
                reset_done(SCpnt);
        else
                scsi_done(SCpnt);
@@ -1101,7 +1103,7 @@ static int aha152x_device_reset(struct scsi_cmnd * SCpnt)
 
        DO_LOCK(flags);
 
-       if (aha152x_scsi_pointer(SCpnt)->phase & resetted) {
+       if (aha152x_priv(SCpnt)->phase & resetted) {
                HOSTDATA(shpnt)->commands--;
                if (!HOSTDATA(shpnt)->commands)
                        SETPORT(PORTA, 0);
@@ -1395,31 +1397,30 @@ static void busfree_run(struct Scsi_Host *shpnt)
        SETPORT(SSTAT1, CLRBUSFREE);
 
        if(CURRENT_SC) {
-               struct scsi_pointer *scsi_pointer =
-                       aha152x_scsi_pointer(CURRENT_SC);
+               struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
 
 #if defined(AHA152X_STAT)
                action++;
 #endif
-               scsi_pointer->phase &= ~syncneg;
+               acp->phase &= ~syncneg;
 
-               if (scsi_pointer->phase & completed) {
+               if (acp->phase & completed) {
                        /* target sent COMMAND COMPLETE */
-                       done(shpnt, scsi_pointer->Status, DID_OK);
+                       done(shpnt, acp->status, DID_OK);
 
-               } else if (scsi_pointer->phase & aborted) {
-                       done(shpnt, scsi_pointer->Status, DID_ABORT);
+               } else if (acp->phase & aborted) {
+                       done(shpnt, acp->status, DID_ABORT);
 
-               } else if (scsi_pointer->phase & resetted) {
-                       done(shpnt, scsi_pointer->Status, DID_RESET);
+               } else if (acp->phase & resetted) {
+                       done(shpnt, acp->status, DID_RESET);
 
-               } else if (scsi_pointer->phase & disconnected) {
+               } else if (acp->phase & disconnected) {
                        /* target sent DISCONNECT */
 #if defined(AHA152X_STAT)
                        HOSTDATA(shpnt)->disconnections++;
 #endif
                        append_SC(&DISCONNECTED_SC, CURRENT_SC);
-                       scsi_pointer->phase |= 1 << 16;
+                       acp->phase |= 1 << 16;
                        CURRENT_SC = NULL;
 
                } else {
@@ -1438,24 +1439,23 @@ static void busfree_run(struct Scsi_Host *shpnt)
                action++;
 #endif
 
-               if (aha152x_scsi_pointer(DONE_SC)->phase & check_condition) {
+               if (aha152x_priv(DONE_SC)->phase & check_condition) {
                        struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC;
                        struct aha152x_scdata *sc = SCDATA(cmd);
 
                        scsi_eh_restore_cmnd(cmd, &sc->ses);
 
-                       aha152x_scsi_pointer(cmd)->Status = SAM_STAT_CHECK_CONDITION;
+                       aha152x_priv(cmd)->status = SAM_STAT_CHECK_CONDITION;
 
                        HOSTDATA(shpnt)->commands--;
                        if (!HOSTDATA(shpnt)->commands)
                                SETPORT(PORTA, 0);      /* turn led off */
-               } else if (aha152x_scsi_pointer(DONE_SC)->Status ==
-                          SAM_STAT_CHECK_CONDITION) {
+               } else if (aha152x_priv(DONE_SC)->status == SAM_STAT_CHECK_CONDITION) {
 #if defined(AHA152X_STAT)
                        HOSTDATA(shpnt)->busfree_with_check_condition++;
 #endif
 
-                       if(!(aha152x_scsi_pointer(DONE_SC)->phase & not_issued)) {
+                       if (!(aha152x_priv(DONE_SC)->phase & not_issued)) {
                                struct aha152x_scdata *sc;
                                struct scsi_cmnd *ptr = DONE_SC;
                                DONE_SC=NULL;
@@ -1480,7 +1480,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
                        if (!HOSTDATA(shpnt)->commands)
                                SETPORT(PORTA, 0);      /* turn led off */
 
-                       if (!(aha152x_scsi_pointer(ptr)->phase & resetting)) {
+                       if (!(aha152x_priv(ptr)->phase & resetting)) {
                                kfree(ptr->host_scribble);
                                ptr->host_scribble=NULL;
                        }
@@ -1503,13 +1503,12 @@ static void busfree_run(struct Scsi_Host *shpnt)
        DO_UNLOCK(flags);
 
        if(CURRENT_SC) {
-               struct scsi_pointer *scsi_pointer =
-                       aha152x_scsi_pointer(CURRENT_SC);
+               struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
 
 #if defined(AHA152X_STAT)
                action++;
 #endif
-               scsi_pointer->phase |= selecting;
+               acp->phase |= selecting;
 
                /* clear selection timeout */
                SETPORT(SSTAT1, SELTO);
@@ -1537,13 +1536,13 @@ static void busfree_run(struct Scsi_Host *shpnt)
  */
 static void seldo_run(struct Scsi_Host *shpnt)
 {
-       struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+       struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
 
        SETPORT(SCSISIG, 0);
        SETPORT(SSTAT1, CLRBUSFREE);
        SETPORT(SSTAT1, CLRPHASECHG);
 
-       scsi_pointer->phase &= ~(selecting | not_issued);
+       acp->phase &= ~(selecting | not_issued);
 
        SETPORT(SCSISEQ, 0);
 
@@ -1558,12 +1557,12 @@ static void seldo_run(struct Scsi_Host *shpnt)
 
        ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun));
 
-       if (scsi_pointer->phase & aborting) {
+       if (acp->phase & aborting) {
                ADDMSGO(ABORT);
-       } else if (scsi_pointer->phase & resetting) {
+       } else if (acp->phase & resetting) {
                ADDMSGO(BUS_DEVICE_RESET);
        } else if (SYNCNEG==0 && SYNCHRONOUS) {
-               scsi_pointer->phase |= syncneg;
+               acp->phase |= syncneg;
                MSGOLEN += spi_populate_sync_msg(&MSGO(MSGOLEN), 50, 8);
                SYNCNEG=1;              /* negotiation in progress */
        }
@@ -1578,7 +1577,7 @@ static void seldo_run(struct Scsi_Host *shpnt)
  */
 static void selto_run(struct Scsi_Host *shpnt)
 {
-       struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+       struct aha152x_cmd_priv *acp;
 
        SETPORT(SCSISEQ, 0);
        SETPORT(SSTAT1, CLRSELTIMO);
@@ -1586,9 +1585,10 @@ static void selto_run(struct Scsi_Host *shpnt)
        if (!CURRENT_SC)
                return;
 
-       scsi_pointer->phase &= ~selecting;
+       acp = aha152x_priv(CURRENT_SC);
+       acp->phase &= ~selecting;
 
-       if (scsi_pointer->phase & aborted)
+       if (acp->phase & aborted)
                done(shpnt, SAM_STAT_GOOD, DID_ABORT);
        else if (TESTLO(SSTAT0, SELINGO))
                done(shpnt, SAM_STAT_GOOD, DID_BUS_BUSY);
@@ -1616,10 +1616,9 @@ static void seldi_run(struct Scsi_Host *shpnt)
        SETPORT(SSTAT1, CLRPHASECHG);
 
        if(CURRENT_SC) {
-               struct scsi_pointer *scsi_pointer =
-                       aha152x_scsi_pointer(CURRENT_SC);
+               struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
 
-               if (!(scsi_pointer->phase & not_issued))
+               if (!(acp->phase & not_issued))
                        scmd_printk(KERN_ERR, CURRENT_SC,
                                    "command should not have been issued yet\n");
 
@@ -1676,7 +1675,7 @@ static void seldi_run(struct Scsi_Host *shpnt)
 static void msgi_run(struct Scsi_Host *shpnt)
 {
        for(;;) {
-               struct scsi_pointer *scsi_pointer;
+               struct aha152x_cmd_priv *acp;
                int sstat1 = GETPORT(SSTAT1);
 
                if(sstat1 & (PHASECHG|PHASEMIS|BUSFREE) || !(sstat1 & REQINIT))
@@ -1714,9 +1713,9 @@ static void msgi_run(struct Scsi_Host *shpnt)
                                continue;
                        }
 
-                       scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
-                       scsi_pointer->Message = MSGI(0);
-                       scsi_pointer->phase &= ~disconnected;
+                       acp = aha152x_priv(CURRENT_SC);
+                       acp->message = MSGI(0);
+                       acp->phase &= ~disconnected;
 
                        MSGILEN=0;
 
@@ -1724,8 +1723,8 @@ static void msgi_run(struct Scsi_Host *shpnt)
                        continue;
                }
 
-               scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
-               scsi_pointer->Message = MSGI(0);
+               acp = aha152x_priv(CURRENT_SC);
+               acp->message = MSGI(0);
 
                switch (MSGI(0)) {
                case DISCONNECT:
@@ -1733,11 +1732,11 @@ static void msgi_run(struct Scsi_Host *shpnt)
                                scmd_printk(KERN_WARNING, CURRENT_SC,
                                            "target was not allowed to disconnect\n");
 
-                       scsi_pointer->phase |= disconnected;
+                       acp->phase |= disconnected;
                        break;
 
                case COMMAND_COMPLETE:
-                       scsi_pointer->phase |= completed;
+                       acp->phase |= completed;
                        break;
 
                case MESSAGE_REJECT:
@@ -1867,11 +1866,9 @@ static void msgi_end(struct Scsi_Host *shpnt)
  */
 static void msgo_init(struct Scsi_Host *shpnt)
 {
-       struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
-
        if(MSGOLEN==0) {
-               if ((scsi_pointer->phase & syncneg) && SYNCNEG==2 &&
-                   SYNCRATE==0) {
+               if ((aha152x_priv(CURRENT_SC)->phase & syncneg) &&
+                   SYNCNEG == 2 && SYNCRATE == 0) {
                        ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun));
                } else {
                        scmd_printk(KERN_INFO, CURRENT_SC,
@@ -1888,7 +1885,7 @@ static void msgo_init(struct Scsi_Host *shpnt)
  */
 static void msgo_run(struct Scsi_Host *shpnt)
 {
-       struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+       struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
 
        while(MSGO_I<MSGOLEN) {
                if (TESTLO(SSTAT0, SPIORDY))
@@ -1901,13 +1898,13 @@ static void msgo_run(struct Scsi_Host *shpnt)
 
 
                if (MSGO(MSGO_I) & IDENTIFY_BASE)
-                       scsi_pointer->phase |= identified;
+                       acp->phase |= identified;
 
                if (MSGO(MSGO_I)==ABORT)
-                       scsi_pointer->phase |= aborted;
+                       acp->phase |= aborted;
 
                if (MSGO(MSGO_I)==BUS_DEVICE_RESET)
-                       scsi_pointer->phase |= resetted;
+                       acp->phase |= resetted;
 
                SETPORT(SCSIDAT, MSGO(MSGO_I++));
        }
@@ -1936,7 +1933,7 @@ static void msgo_end(struct Scsi_Host *shpnt)
  */
 static void cmd_init(struct Scsi_Host *shpnt)
 {
-       if (aha152x_scsi_pointer(CURRENT_SC)->sent_command) {
+       if (aha152x_priv(CURRENT_SC)->sent_command) {
                scmd_printk(KERN_ERR, CURRENT_SC,
                            "command already sent\n");
                done(shpnt, SAM_STAT_GOOD, DID_ERROR);
@@ -1967,7 +1964,7 @@ static void cmd_end(struct Scsi_Host *shpnt)
                            "command sent incompletely (%d/%d)\n",
                            CMD_I, CURRENT_SC->cmd_len);
        else
-               aha152x_scsi_pointer(CURRENT_SC)->sent_command++;
+               aha152x_priv(CURRENT_SC)->sent_command++;
 }
 
 /*
@@ -1979,7 +1976,7 @@ static void status_run(struct Scsi_Host *shpnt)
        if (TESTLO(SSTAT0, SPIORDY))
                return;
 
-       aha152x_scsi_pointer(CURRENT_SC)->Status = GETPORT(SCSIDAT);
+       aha152x_priv(CURRENT_SC)->status = GETPORT(SCSIDAT);
 
 }
 
@@ -2003,7 +2000,7 @@ static void datai_init(struct Scsi_Host *shpnt)
 
 static void datai_run(struct Scsi_Host *shpnt)
 {
-       struct scsi_pointer *scsi_pointer;
+       struct aha152x_cmd_priv *acp;
        unsigned long the_time;
        int fifodata, data_count;
 
@@ -2041,36 +2038,35 @@ static void datai_run(struct Scsi_Host *shpnt)
                        fifodata = GETPORT(FIFOSTAT);
                }
 
-               scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
-               if (scsi_pointer->this_residual > 0) {
-                       while (fifodata > 0 && scsi_pointer->this_residual > 0) {
-                               data_count = fifodata > scsi_pointer->this_residual ?
-                                               scsi_pointer->this_residual :
-                                               fifodata;
+               acp = aha152x_priv(CURRENT_SC);
+               if (acp->this_residual > 0) {
+                       while (fifodata > 0 && acp->this_residual > 0) {
+                               data_count = fifodata > acp->this_residual ?
+                                               acp->this_residual : fifodata;
                                fifodata -= data_count;
 
                                if (data_count & 1) {
                                        SETPORT(DMACNTRL0, ENDMA|_8BIT);
-                                       *scsi_pointer->ptr++ = GETPORT(DATAPORT);
-                                       scsi_pointer->this_residual--;
+                                       *acp->ptr++ = GETPORT(DATAPORT);
+                                       acp->this_residual--;
                                        DATA_LEN++;
                                        SETPORT(DMACNTRL0, ENDMA);
                                }
 
                                if (data_count > 1) {
                                        data_count >>= 1;
-                                       insw(DATAPORT, scsi_pointer->ptr, data_count);
-                                       scsi_pointer->ptr += 2 * data_count;
-                                       scsi_pointer->this_residual -= 2 * data_count;
+                                       insw(DATAPORT, acp->ptr, data_count);
+                                       acp->ptr += 2 * data_count;
+                                       acp->this_residual -= 2 * data_count;
                                        DATA_LEN += 2 * data_count;
                                }
 
-                               if (scsi_pointer->this_residual == 0 &&
-                                   !sg_is_last(scsi_pointer->buffer)) {
+                               if (acp->this_residual == 0 &&
+                                   !sg_is_last(acp->buffer)) {
                                        /* advance to next buffer */
-                                       scsi_pointer->buffer = sg_next(scsi_pointer->buffer);
-                                       scsi_pointer->ptr           = SG_ADDRESS(scsi_pointer->buffer);
-                                       scsi_pointer->this_residual = scsi_pointer->buffer->length;
+                                       acp->buffer = sg_next(acp->buffer);
+                                       acp->ptr = SG_ADDRESS(acp->buffer);
+                                       acp->this_residual = acp->buffer->length;
                                }
                        }
                } else if (fifodata > 0) {
@@ -2138,15 +2134,15 @@ static void datao_init(struct Scsi_Host *shpnt)
 
 static void datao_run(struct Scsi_Host *shpnt)
 {
-       struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+       struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
        unsigned long the_time;
        int data_count;
 
        /* until phase changes or all data sent */
-       while (TESTLO(DMASTAT, INTSTAT) && scsi_pointer->this_residual > 0) {
+       while (TESTLO(DMASTAT, INTSTAT) && acp->this_residual > 0) {
                data_count = 128;
-               if (data_count > scsi_pointer->this_residual)
-                       data_count = scsi_pointer->this_residual;
+               if (data_count > acp->this_residual)
+                       data_count = acp->this_residual;
 
                if(TESTLO(DMASTAT, DFIFOEMP)) {
                        scmd_printk(KERN_ERR, CURRENT_SC,
@@ -2157,26 +2153,25 @@ static void datao_run(struct Scsi_Host *shpnt)
 
                if(data_count & 1) {
                        SETPORT(DMACNTRL0,WRITE_READ|ENDMA|_8BIT);
-                       SETPORT(DATAPORT, *scsi_pointer->ptr++);
-                       scsi_pointer->this_residual--;
+                       SETPORT(DATAPORT, *acp->ptr++);
+                       acp->this_residual--;
                        CMD_INC_RESID(CURRENT_SC, -1);
                        SETPORT(DMACNTRL0,WRITE_READ|ENDMA);
                }
 
                if(data_count > 1) {
                        data_count >>= 1;
-                       outsw(DATAPORT, scsi_pointer->ptr, data_count);
-                       scsi_pointer->ptr           += 2 * data_count;
-                       scsi_pointer->this_residual -= 2 * data_count;
+                       outsw(DATAPORT, acp->ptr, data_count);
+                       acp->ptr += 2 * data_count;
+                       acp->this_residual -= 2 * data_count;
                        CMD_INC_RESID(CURRENT_SC, -2 * data_count);
                }
 
-               if (scsi_pointer->this_residual == 0 &&
-                   !sg_is_last(scsi_pointer->buffer)) {
+               if (acp->this_residual == 0 && !sg_is_last(acp->buffer)) {
                        /* advance to next buffer */
-                       scsi_pointer->buffer = sg_next(scsi_pointer->buffer);
-                       scsi_pointer->ptr           = SG_ADDRESS(scsi_pointer->buffer);
-                       scsi_pointer->this_residual = scsi_pointer->buffer->length;
+                       acp->buffer = sg_next(acp->buffer);
+                       acp->ptr = SG_ADDRESS(acp->buffer);
+                       acp->this_residual = acp->buffer->length;
                }
 
                the_time=jiffies + 100*HZ;
@@ -2192,7 +2187,7 @@ static void datao_run(struct Scsi_Host *shpnt)
 
 static void datao_end(struct Scsi_Host *shpnt)
 {
-       struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+       struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
 
        if(TESTLO(DMASTAT, DFIFOEMP)) {
                u32 datao_cnt = GETSTCNT();
@@ -2211,10 +2206,9 @@ static void datao_end(struct Scsi_Host *shpnt)
                        sg = sg_next(sg);
                }
 
-               scsi_pointer->buffer = sg;
-               scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer) + done;
-               scsi_pointer->this_residual = scsi_pointer->buffer->length -
-                       done;
+               acp->buffer = sg;
+               acp->ptr = SG_ADDRESS(acp->buffer) + done;
+               acp->this_residual = acp->buffer->length - done;
        }
 
        SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT);
@@ -2229,7 +2223,6 @@ static void datao_end(struct Scsi_Host *shpnt)
  */
 static int update_state(struct Scsi_Host *shpnt)
 {
-       struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
        int dataphase=0;
        unsigned int stat0 = GETPORT(SSTAT0);
        unsigned int stat1 = GETPORT(SSTAT1);
@@ -2244,7 +2237,7 @@ static int update_state(struct Scsi_Host *shpnt)
        } else if (stat0 & SELDI && PREVSTATE == busfree) {
                STATE=seldi;
        } else if (stat0 & SELDO && CURRENT_SC &&
-                  (scsi_pointer->phase & selecting)) {
+                  (aha152x_priv(CURRENT_SC)->phase & selecting)) {
                STATE=seldo;
        } else if(stat1 & SELTO) {
                STATE=selto;
@@ -2376,8 +2369,7 @@ static void is_complete(struct Scsi_Host *shpnt)
                        SETPORT(SXFRCTL0, CH1);
                        SETPORT(DMACNTRL0, 0);
                        if(CURRENT_SC)
-                               aha152x_scsi_pointer(CURRENT_SC)->phase &=
-                                       ~spiordy;
+                               aha152x_priv(CURRENT_SC)->phase &= ~spiordy;
                }
 
                /*
@@ -2399,8 +2391,7 @@ static void is_complete(struct Scsi_Host *shpnt)
                        SETPORT(DMACNTRL0, 0);
                        SETPORT(SXFRCTL0, CH1|SPIOEN);
                        if(CURRENT_SC)
-                               aha152x_scsi_pointer(CURRENT_SC)->phase |=
-                                       spiordy;
+                               aha152x_priv(CURRENT_SC)->phase |= spiordy;
                }
 
                /*
@@ -2490,7 +2481,7 @@ static void disp_enintr(struct Scsi_Host *shpnt)
  */
 static void show_command(struct scsi_cmnd *ptr)
 {
-       const int phase = aha152x_scsi_pointer(ptr)->phase;
+       const int phase = aha152x_priv(ptr)->phase;
 
        scsi_print_command(ptr);
        scmd_printk(KERN_DEBUG, ptr,
@@ -2538,8 +2529,8 @@ static void show_queues(struct Scsi_Host *shpnt)
 
 static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
 {
-       struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(ptr);
-       const int phase = scsi_pointer->phase;
+       struct aha152x_cmd_priv *acp = aha152x_priv(ptr);
+       const int phase = acp->phase;
        int i;
 
        seq_printf(m, "%p: target=%d; lun=%d; cmnd=( ",
@@ -2549,8 +2540,8 @@ static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
                seq_printf(m, "0x%02x ", ptr->cmnd[i]);
 
        seq_printf(m, "); resid=%d; residual=%d; buffers=%d; phase |",
-               scsi_get_resid(ptr), scsi_pointer->this_residual,
-               sg_nents(scsi_pointer->buffer) - 1);
+               scsi_get_resid(ptr), acp->this_residual,
+               sg_nents(acp->buffer) - 1);
 
        if (phase & not_issued)
                seq_puts(m, "not issued|");
index 679a4fd138746e406d95877043c6fdb0c668c035..793fe19993a90e4b71a121fe4ab55d5fa8596e52 100644 (file)
@@ -420,8 +420,6 @@ ahd_unlock(struct ahd_softc *ahd, unsigned long *flags)
 
 /* config registers for header type 0 devices */
 #define PCIR_MAPS      0x10
-#define PCIR_SUBVEND_0 0x2c
-#define PCIR_SUBDEV_0  0x2e
 
 /****************************** PCI-X definitions *****************************/
 #define PCIXR_COMMAND  0x96
index 2f0bdb9225a40183a0045c3a2d598e3230f7c851..5fad41b1ab58d40bd28a0ecff1e75fdd5f515687 100644 (file)
@@ -260,8 +260,8 @@ ahd_find_pci_device(ahd_dev_softc_t pci)
 
        vendor = ahd_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
        device = ahd_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2);
-       subvendor = ahd_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2);
-       subdevice = ahd_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2);
+       subvendor = ahd_pci_read_config(pci, PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2);
+       subdevice = ahd_pci_read_config(pci, PCI_SUBSYSTEM_ID, /*bytes*/2);
        full_id = ahd_compose_id(device,
                                 vendor,
                                 subdevice,
@@ -298,7 +298,7 @@ ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
         * Record if this is an HP board.
         */
        subvendor = ahd_pci_read_config(ahd->dev_softc,
-                                       PCIR_SUBVEND_0, /*bytes*/2);
+                                       PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2);
        if (subvendor == SUBID_HP)
                ahd->flags |= AHD_HP_BOARD;
 
index 4782a304e93cc420a469355a6e66a6f2dfd2f9fe..51d9f4de07346a83476414f358e6f57279f44734 100644 (file)
@@ -433,8 +433,6 @@ ahc_unlock(struct ahc_softc *ahc, unsigned long *flags)
 
 /* config registers for header type 0 devices */
 #define PCIR_MAPS      0x10
-#define PCIR_SUBVEND_0 0x2c
-#define PCIR_SUBDEV_0  0x2e
 
 typedef enum
 {
index dab3a6d12c4d22395822fb45e9fd4aba4de9493e..2d4c85426dc3eb28a93ef5036c158fb53b8feeae 100644 (file)
@@ -673,8 +673,8 @@ ahc_find_pci_device(ahc_dev_softc_t pci)
 
        vendor = ahc_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
        device = ahc_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2);
-       subvendor = ahc_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2);
-       subdevice = ahc_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2);
+       subvendor = ahc_pci_read_config(pci, PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2);
+       subdevice = ahc_pci_read_config(pci, PCI_SUBSYSTEM_ID, /*bytes*/2);
        full_id = ahc_compose_id(device, vendor, subdevice, subvendor);
 
        /*
index 0103f811cc252f9749b389bf95a0071ccb4dd1fd..77654438559864829fe0a0629472f93fb780ac5b 100644 (file)
@@ -1169,7 +1169,7 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
                ofld_kcqe->fcoe_conn_context_id);
        interface = tgt->port->priv;
        if (hba != interface->hba) {
-               printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
+               printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mismatch\n");
                goto ofld_cmpl_err;
        }
        /*
@@ -1226,12 +1226,12 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
         * and enable
         */
        if (tgt->context_id != context_id) {
-               printk(KERN_ERR PFX "context id mis-match\n");
+               printk(KERN_ERR PFX "context id mismatch\n");
                return;
        }
        interface = tgt->port->priv;
        if (hba != interface->hba) {
-               printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
+               printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mismatch\n");
                goto enbl_cmpl_err;
        }
        if (!ofld_kcqe->completion_status)
index 5521469ce678b51773bdf3e3e6b46ea2140519ac..6c864b093ac94aad8d8182807101f4740ae272a9 100644 (file)
@@ -1977,7 +1977,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
                if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
                        break;
 
-               if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) {
+               if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
                        if (nopin->op_code == ISCSI_OP_NOOP_IN &&
                            nopin->itt == (u16) RESERVED_ITT) {
                                printk(KERN_ALERT "bnx2i: Unsolicited "
@@ -2398,7 +2398,7 @@ static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba,
        }
 
        if (hba != ep->hba) {
-               printk(KERN_ALERT "conn destroy- error hba mis-match\n");
+               printk(KERN_ALERT "conn destroy- error hba mismatch\n");
                return;
        }
 
@@ -2432,7 +2432,7 @@ static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
        }
 
        if (hba != ep->hba) {
-               printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n");
+               printk(KERN_ALERT "ofld_cmpl: error hba mismatch\n");
                return;
        }
 
index fe86fd61a995c19c28063a7238efc6ea2af726e4..15fbd09baa943a6caf1651ceddd336b5a11b5649 100644 (file)
@@ -1721,7 +1721,7 @@ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
                        struct iscsi_conn *conn = ep->conn->cls_conn->dd_data;
 
                        /* Must suspend all rx queue activity for this ep */
-                       set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+                       set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
                }
                /* CONN_DISCONNECT timeout may or may not be an issue depending
                 * on what transcribed in TCP layer, different targets behave
index 8c7d4dda4cf29944c5f73a487852b818112103e1..4365d52c6430e79db085e175e724c5d6eb34822f 100644 (file)
@@ -1634,11 +1634,11 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
        log_debug(1 << CXGBI_DBG_PDU_RX,
                "csk 0x%p, conn 0x%p.\n", csk, conn);
 
-       if (unlikely(!conn || conn->suspend_rx)) {
+       if (unlikely(!conn || test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
                log_debug(1 << CXGBI_DBG_PDU_RX,
-                       "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
+                       "csk 0x%p, conn 0x%p, id %d, conn flags 0x%lx!\n",
                        csk, conn, conn ? conn->id : 0xFF,
-                       conn ? conn->suspend_rx : 0xFF);
+                       conn ? conn->flags : 0xFF);
                return;
        }
 
index 461ef8a76c4ce700d432ea1e5054bcbe673227ef..4bda2f6cb3526f0bf16e41f08b69c34e6a4050ee 100644 (file)
@@ -442,7 +442,6 @@ void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
        case SAS_PROTOCOL_INTERNAL_ABORT:
                hisi_sas_task_prep_abort(hisi_hba, slot);
                break;
-       fallthrough;
        default:
                return;
        }
index 80238e6a3c9839294b438e7f74e1fb4602c36ca5..eee1a24f7e15e8c8ac6700d5972b8b439823fe9f 100644 (file)
@@ -36,7 +36,7 @@
 
 #define IBMVSCSIS_VERSION      "v0.2"
 
-#define        INITIAL_SRP_LIMIT       800
+#define        INITIAL_SRP_LIMIT       1024
 #define        DEFAULT_MAX_SECTORS     256
 #define MAX_TXU                        1024 * 1024
 
index d690d9cf7eb15a325920a57672a200a2530ad4d1..35589b6af90d6e40896b2b5979bbb8cad6a749db 100644 (file)
@@ -413,7 +413,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
                                dev_warn(&ihost->pdev->dev,
                                         "%s: SCIC Controller 0x%p received "
                                         "event 0x%x for io request object "
-                                        "that doesnt exist.\n",
+                                        "that doesn't exist.\n",
                                         __func__,
                                         ihost,
                                         ent);
@@ -428,7 +428,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
                                dev_warn(&ihost->pdev->dev,
                                         "%s: SCIC Controller 0x%p received "
                                         "event 0x%x for remote device object "
-                                        "that doesnt exist.\n",
+                                        "that doesn't exist.\n",
                                         __func__,
                                         ihost,
                                         ent);
@@ -462,7 +462,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
                } else
                        dev_err(&ihost->pdev->dev,
                                "%s: SCIC Controller 0x%p received event 0x%x "
-                               "for remote device object 0x%0x that doesnt "
+                               "for remote device object 0x%0x that doesn't "
                                "exist.\n",
                                __func__,
                                ihost,
index d09926e6c8a86ef225c21f8e7ee2929f9134840c..797abf4f53995a124c0c48b586f93f9feb505d25 100644 (file)
@@ -678,7 +678,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
        struct iscsi_task *task;
        itt_t itt;
 
-       if (session->state == ISCSI_STATE_TERMINATE)
+       if (session->state == ISCSI_STATE_TERMINATE ||
+           !test_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags))
                return NULL;
 
        if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) {
@@ -1392,8 +1393,8 @@ static bool iscsi_set_conn_failed(struct iscsi_conn *conn)
        if (conn->stop_stage == 0)
                session->state = ISCSI_STATE_FAILED;
 
-       set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
-       set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+       set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
+       set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
        return true;
 }
 
@@ -1454,7 +1455,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
         * Do this after dropping the extra ref because if this was a requeue
         * it's removed from that list and cleanup_queued_task would miss it.
         */
-       if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+       if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
                /*
                 * Save the task and ref in case we weren't cleaning up this
                 * task and get woken up again.
@@ -1532,7 +1533,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
        int rc = 0;
 
        spin_lock_bh(&conn->session->frwd_lock);
-       if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+       if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
                ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
                spin_unlock_bh(&conn->session->frwd_lock);
                return -ENODATA;
@@ -1746,7 +1747,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
                goto fault;
        }
 
-       if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+       if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
                reason = FAILURE_SESSION_IN_RECOVERY;
                sc->result = DID_REQUEUE << 16;
                goto fault;
@@ -1935,7 +1936,7 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
 void iscsi_suspend_queue(struct iscsi_conn *conn)
 {
        spin_lock_bh(&conn->session->frwd_lock);
-       set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+       set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
        spin_unlock_bh(&conn->session->frwd_lock);
 }
 EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
@@ -1953,7 +1954,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
        struct Scsi_Host *shost = conn->session->host;
        struct iscsi_host *ihost = shost_priv(shost);
 
-       set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+       set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
        if (ihost->workq)
                flush_workqueue(ihost->workq);
 }
@@ -1961,7 +1962,7 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
 
 static void iscsi_start_tx(struct iscsi_conn *conn)
 {
-       clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+       clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
        iscsi_conn_queue_work(conn);
 }
 
@@ -2214,6 +2215,8 @@ void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active)
        iscsi_suspend_tx(conn);
 
        spin_lock_bh(&session->frwd_lock);
+       clear_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
+
        if (!is_active) {
                /*
                 * if logout timed out before userspace could even send a PDU
@@ -3045,7 +3048,6 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
        if (!cls_conn)
                return NULL;
        conn = cls_conn->dd_data;
-       memset(conn, 0, sizeof(*conn) + dd_size);
 
        conn->dd_data = cls_conn->dd_data + sizeof(*conn);
        conn->session = session;
@@ -3318,6 +3320,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
        spin_lock_bh(&session->frwd_lock);
        if (is_leading)
                session->leadconn = conn;
+
+       set_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
        spin_unlock_bh(&session->frwd_lock);
 
        /*
@@ -3330,8 +3334,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
        /*
         * Unblock xmitworker(), Login Phase will pass through.
         */
-       clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
-       clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+       clear_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
+       clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
        return 0;
 }
 EXPORT_SYMBOL_GPL(iscsi_conn_bind);
index 2e9ffe3d1a55e7ccff3203947493a2183a5bd92f..883005757ddb82cdab6c74b46afd6a9586fd2e0e 100644 (file)
@@ -927,7 +927,7 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
         */
        conn->last_recv = jiffies;
 
-       if (unlikely(conn->suspend_rx)) {
+       if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
                ISCSI_DBG_TCP(conn, "Rx suspended!\n");
                *status = ISCSI_TCP_SUSPENDED;
                return 0;
index f0cf8ffdc5f3ea7f6c0ba38ca04146c2ed6e256a..0025760230e5182ffdeddb4e5a6aca470024394d 100644 (file)
@@ -897,6 +897,11 @@ enum lpfc_irq_chann_mode {
        NHT_MODE,
 };
 
+enum lpfc_hba_bit_flags {
+       FABRIC_COMANDS_BLOCKED,
+       HBA_PCI_ERR,
+};
+
 struct lpfc_hba {
        /* SCSI interface function jump table entries */
        struct lpfc_io_buf * (*lpfc_get_scsi_buf)
@@ -1043,7 +1048,6 @@ struct lpfc_hba {
                                         * Firmware supports Forced Link Speed
                                         * capability
                                         */
-#define HBA_PCI_ERR            0x80000 /* The PCI slot is offline */
 #define HBA_FLOGI_ISSUED       0x100000 /* FLOGI was issued */
 #define HBA_SHORT_CMF          0x200000 /* shorter CMF timer routine */
 #define HBA_CGN_DAY_WRAP       0x400000 /* HBA Congestion info day wraps */
@@ -1350,7 +1354,6 @@ struct lpfc_hba {
        atomic_t fabric_iocb_count;
        struct timer_list fabric_block_timer;
        unsigned long bit_flags;
-#define        FABRIC_COMANDS_BLOCKED  0
        atomic_t num_rsrc_err;
        atomic_t num_cmd_success;
        unsigned long last_rsrc_error_time;
index 96408cd6c4c81668a68d31e5c7f6793711fe7b41..9897a1aa387b63a00a4e34fcd6975fb351fb3a39 100644 (file)
@@ -670,3 +670,6 @@ struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
                                              uint32_t hash, uint8_t *buf);
 void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport);
 int lpfc_issue_els_qfpa(struct lpfc_vport *vport);
+
+void lpfc_sli_rpi_release(struct lpfc_vport *vport,
+                         struct lpfc_nodelist *ndlp);
index 0144da30e3dbd0c5448d55e0fb71ad9126312143..2b877dff5ed4fb515602c227b69a53c7fe53fdd1 100644 (file)
@@ -109,8 +109,8 @@ lpfc_rport_invalid(struct fc_rport *rport)
 
        ndlp = rdata->pnode;
        if (!rdata->pnode) {
-               pr_err("**** %s: NULL ndlp on rport x%px SID x%x\n",
-                      __func__, rport, rport->scsi_target_id);
+               pr_info("**** %s: NULL ndlp on rport x%px SID x%x\n",
+                       __func__, rport, rport->scsi_target_id);
                return -EINVAL;
        }
 
@@ -169,9 +169,10 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
 
        lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
                         "3181 dev_loss_callbk x%06x, rport x%px flg x%x "
-                        "load_flag x%x refcnt %d\n",
+                        "load_flag x%x refcnt %d state %d xpt x%x\n",
                         ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
-                        vport->load_flag, kref_read(&ndlp->kref));
+                        vport->load_flag, kref_read(&ndlp->kref),
+                        ndlp->nlp_state, ndlp->fc4_xpt_flags);
 
        /* Don't schedule a worker thread event if the vport is going down.
         * The teardown process cleans up the node via lpfc_drop_node.
@@ -181,6 +182,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
                ndlp->rport = NULL;
 
                ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
+               /* clear the NLP_XPT_REGD if the node is not registered
+                * with nvme-fc
+                */
+               if (ndlp->fc4_xpt_flags == NLP_XPT_REGD)
+                       ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
 
                /* Remove the node reference from remote_port_add now.
                 * The driver will not call remote_port_delete.
@@ -225,18 +231,36 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
        ndlp->rport = NULL;
        spin_unlock_irqrestore(&ndlp->lock, iflags);
 
-       /* We need to hold the node by incrementing the reference
-        * count until this queued work is done
-        */
-       evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+       if (phba->worker_thread) {
+               /* We need to hold the node by incrementing the reference
+                * count until this queued work is done
+                */
+               evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+
+               spin_lock_irqsave(&phba->hbalock, iflags);
+               if (evtp->evt_arg1) {
+                       evtp->evt = LPFC_EVT_DEV_LOSS;
+                       list_add_tail(&evtp->evt_listp, &phba->work_list);
+                       lpfc_worker_wake_up(phba);
+               }
+               spin_unlock_irqrestore(&phba->hbalock, iflags);
+       } else {
+               lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+                                "3188 worker thread is stopped %s x%06x, "
+                                " rport x%px flg x%x load_flag x%x refcnt "
+                                "%d\n", __func__, ndlp->nlp_DID,
+                                ndlp->rport, ndlp->nlp_flag,
+                                vport->load_flag, kref_read(&ndlp->kref));
+               if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) {
+                       spin_lock_irqsave(&ndlp->lock, iflags);
+                       /* Node is in dev loss.  No further transaction. */
+                       ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
+                       spin_unlock_irqrestore(&ndlp->lock, iflags);
+                       lpfc_disc_state_machine(vport, ndlp, NULL,
+                                               NLP_EVT_DEVICE_RM);
+               }
 
-       spin_lock_irqsave(&phba->hbalock, iflags);
-       if (evtp->evt_arg1) {
-               evtp->evt = LPFC_EVT_DEV_LOSS;
-               list_add_tail(&evtp->evt_listp, &phba->work_list);
-               lpfc_worker_wake_up(phba);
        }
-       spin_unlock_irqrestore(&phba->hbalock, iflags);
 
        return;
 }
@@ -503,11 +527,12 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
                                 "0203 Devloss timeout on "
                                 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
-                                "NPort x%06x Data: x%x x%x x%x\n",
+                                "NPort x%06x Data: x%x x%x x%x refcnt %d\n",
                                 *name, *(name+1), *(name+2), *(name+3),
                                 *(name+4), *(name+5), *(name+6), *(name+7),
                                 ndlp->nlp_DID, ndlp->nlp_flag,
-                                ndlp->nlp_state, ndlp->nlp_rpi);
+                                ndlp->nlp_state, ndlp->nlp_rpi,
+                                kref_read(&ndlp->kref));
        } else {
                lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT,
                                 "0204 Devloss timeout on "
@@ -755,18 +780,22 @@ lpfc_work_list_done(struct lpfc_hba *phba)
        int free_evt;
        int fcf_inuse;
        uint32_t nlp_did;
+       bool hba_pci_err;
 
        spin_lock_irq(&phba->hbalock);
        while (!list_empty(&phba->work_list)) {
                list_remove_head((&phba->work_list), evtp, typeof(*evtp),
                                 evt_listp);
                spin_unlock_irq(&phba->hbalock);
+               hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
                free_evt = 1;
                switch (evtp->evt) {
                case LPFC_EVT_ELS_RETRY:
                        ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
-                       lpfc_els_retry_delay_handler(ndlp);
-                       free_evt = 0; /* evt is part of ndlp */
+                       if (!hba_pci_err) {
+                               lpfc_els_retry_delay_handler(ndlp);
+                               free_evt = 0; /* evt is part of ndlp */
+                       }
                        /* decrement the node reference count held
                         * for this queued work
                         */
@@ -788,8 +817,10 @@ lpfc_work_list_done(struct lpfc_hba *phba)
                        break;
                case LPFC_EVT_RECOVER_PORT:
                        ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
-                       lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
-                       free_evt = 0;
+                       if (!hba_pci_err) {
+                               lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
+                               free_evt = 0;
+                       }
                        /* decrement the node reference count held for
                         * this queued work
                         */
@@ -859,14 +890,18 @@ lpfc_work_done(struct lpfc_hba *phba)
        struct lpfc_vport **vports;
        struct lpfc_vport *vport;
        int i;
+       bool hba_pci_err;
 
+       hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
        spin_lock_irq(&phba->hbalock);
        ha_copy = phba->work_ha;
        phba->work_ha = 0;
        spin_unlock_irq(&phba->hbalock);
+       if (hba_pci_err)
+               ha_copy = 0;
 
        /* First, try to post the next mailbox command to SLI4 device */
-       if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
+       if (phba->pci_dev_grp == LPFC_PCI_DEV_OC && !hba_pci_err)
                lpfc_sli4_post_async_mbox(phba);
 
        if (ha_copy & HA_ERATT) {
@@ -886,7 +921,7 @@ lpfc_work_done(struct lpfc_hba *phba)
                lpfc_handle_latt(phba);
 
        /* Handle VMID Events */
-       if (lpfc_is_vmid_enabled(phba)) {
+       if (lpfc_is_vmid_enabled(phba) && !hba_pci_err) {
                if (phba->pport->work_port_events &
                    WORKER_CHECK_VMID_ISSUE_QFPA) {
                        lpfc_check_vmid_qfpa_issue(phba);
@@ -936,6 +971,8 @@ lpfc_work_done(struct lpfc_hba *phba)
                        work_port_events = vport->work_port_events;
                        vport->work_port_events &= ~work_port_events;
                        spin_unlock_irq(&vport->work_port_lock);
+                       if (hba_pci_err)
+                               continue;
                        if (work_port_events & WORKER_DISC_TMO)
                                lpfc_disc_timeout_handler(vport);
                        if (work_port_events & WORKER_ELS_TMO)
@@ -1173,12 +1210,14 @@ lpfc_linkdown(struct lpfc_hba *phba)
        struct lpfc_vport **vports;
        LPFC_MBOXQ_t          *mb;
        int i;
+       int offline;
 
        if (phba->link_state == LPFC_LINK_DOWN)
                return 0;
 
        /* Block all SCSI stack I/Os */
        lpfc_scsi_dev_block(phba);
+       offline = pci_channel_offline(phba->pcidev);
 
        phba->defer_flogi_acc_flag = false;
 
@@ -1219,7 +1258,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
        lpfc_destroy_vport_work_array(phba, vports);
 
        /* Clean up any SLI3 firmware default rpi's */
-       if (phba->sli_rev > LPFC_SLI_REV3)
+       if (phba->sli_rev > LPFC_SLI_REV3 || offline)
                goto skip_unreg_did;
 
        mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -4712,6 +4751,11 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        spin_lock_irqsave(&ndlp->lock, iflags);
        if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) {
                spin_unlock_irqrestore(&ndlp->lock, iflags);
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+                                "0999 %s Not regd: ndlp x%px rport x%px DID "
+                                "x%x FLG x%x XPT x%x\n",
+                                 __func__, ndlp, ndlp->rport, ndlp->nlp_DID,
+                                 ndlp->nlp_flag, ndlp->fc4_xpt_flags);
                return;
        }
 
@@ -4722,6 +4766,13 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
            ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
                vport->phba->nport_event_cnt++;
                lpfc_unregister_remote_port(ndlp);
+       } else if (!ndlp->rport) {
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+                                "1999 %s NDLP in devloss x%px DID x%x FLG x%x"
+                                " XPT x%x refcnt %d\n",
+                                __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag,
+                                ndlp->fc4_xpt_flags,
+                                kref_read(&ndlp->kref));
        }
 
        if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) {
@@ -5371,6 +5422,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                                ndlp->nlp_flag &= ~NLP_UNREG_INP;
                                mempool_free(mbox, phba->mbox_mem_pool);
                                acc_plogi = 1;
+                               lpfc_nlp_put(ndlp);
                        }
                } else {
                        lpfc_printf_vlog(vport, KERN_INFO,
@@ -6097,12 +6149,34 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
        }
 }
 
+/*
+ * lpfc_notify_xport_npr - notifies xport of node disappearance
+ * @vport: Pointer to Virtual Port object.
+ *
+ * Transitions all ndlps to NPR state.  When lpfc_nlp_set_state
+ * calls lpfc_nlp_state_cleanup, the ndlp->rport is unregistered
+ * and transport notified that the node is gone.
+ * Return Code:
+ *     none
+ */
+static void
+lpfc_notify_xport_npr(struct lpfc_vport *vport)
+{
+       struct lpfc_nodelist *ndlp, *next_ndlp;
+
+       list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
+                                nlp_listp) {
+               lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+       }
+}
 void
 lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
 {
        lpfc_els_flush_rscn(vport);
        lpfc_els_flush_cmd(vport);
        lpfc_disc_flush_list(vport);
+       if (pci_channel_offline(vport->phba->pcidev))
+               lpfc_notify_xport_npr(vport);
 }
 
 /*****************************************************************************/
index eed6464bd880669807eb339e25e71532c75010dc..461d333b1b3a828e2b03de2aebafe9cebe86f0e4 100644 (file)
@@ -95,6 +95,7 @@ static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
 static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
+static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
 
 static struct scsi_transport_template *lpfc_transport_template = NULL;
 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -1642,7 +1643,7 @@ lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
 {
        spin_lock_irq(&phba->hbalock);
        if (phba->link_state == LPFC_HBA_ERROR &&
-           phba->hba_flag & HBA_PCI_ERR) {
+               test_bit(HBA_PCI_ERR, &phba->bit_flags)) {
                spin_unlock_irq(&phba->hbalock);
                return;
        }
@@ -1985,6 +1986,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
        if (pci_channel_offline(phba->pcidev)) {
                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
                                "3166 pci channel is offline\n");
+               lpfc_sli_flush_io_rings(phba);
                return;
        }
 
@@ -2973,6 +2975,22 @@ lpfc_cleanup(struct lpfc_vport *vport)
                                        NLP_EVT_DEVICE_RM);
        }
 
+       /* This is a special case flush to return all
+        * IOs before entering this loop. There are
+        * two points in the code where a flush is
+        * avoided if the FC_UNLOADING flag is set.
+        * one is in the multipool destroy,
+        * (this prevents a crash) and the other is
+        * in the nvme abort handler, ( also prevents
+        * a crash). Both of these exceptions are
+        * cases where the slot is still accessible.
+        * The flush here is only when the pci slot
+        * is offline.
+        */
+       if (vport->load_flag & FC_UNLOADING &&
+           pci_channel_offline(phba->pcidev))
+               lpfc_sli_flush_io_rings(vport->phba);
+
        /* At this point, ALL ndlp's should be gone
         * because of the previous NLP_EVT_DEVICE_RM.
         * Lets wait for this to happen, if needed.
@@ -2985,7 +3003,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
                        list_for_each_entry_safe(ndlp, next_ndlp,
                                                &vport->fc_nodes, nlp_listp) {
                                lpfc_printf_vlog(ndlp->vport, KERN_ERR,
-                                                LOG_TRACE_EVENT,
+                                                LOG_DISCOVERY,
                                                 "0282 did:x%x ndlp:x%px "
                                                 "refcnt:%d xflags x%x nflag x%x\n",
                                                 ndlp->nlp_DID, (void *)ndlp,
@@ -3682,7 +3700,8 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
        struct lpfc_vport **vports;
        struct Scsi_Host *shost;
        int i;
-       int offline = 0;
+       int offline;
+       bool hba_pci_err;
 
        if (vport->fc_flag & FC_OFFLINE_MODE)
                return;
@@ -3692,6 +3711,7 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
        lpfc_linkdown(phba);
 
        offline =  pci_channel_offline(phba->pcidev);
+       hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
 
        /* Issue an unreg_login to all nodes on all vports */
        vports = lpfc_create_vport_work_array(phba);
@@ -3715,11 +3735,14 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
                                ndlp->nlp_flag &= ~NLP_NPR_ADISC;
                                spin_unlock_irq(&ndlp->lock);
 
-                               if (offline) {
+                               if (offline || hba_pci_err) {
                                        spin_lock_irq(&ndlp->lock);
                                        ndlp->nlp_flag &= ~(NLP_UNREG_INP |
                                                            NLP_RPI_REGISTERED);
                                        spin_unlock_irq(&ndlp->lock);
+                                       if (phba->sli_rev == LPFC_SLI_REV4)
+                                               lpfc_sli_rpi_release(vports[i],
+                                                                    ndlp);
                                } else {
                                        lpfc_unreg_rpi(vports[i], ndlp);
                                }
@@ -13354,8 +13377,9 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
        /* Abort all iocbs associated with the hba */
        lpfc_sli_hba_iocb_abort(phba);
 
-       /* Wait for completion of device XRI exchange busy */
-       lpfc_sli4_xri_exchange_busy_wait(phba);
+       if (!pci_channel_offline(phba->pcidev))
+               /* Wait for completion of device XRI exchange busy */
+               lpfc_sli4_xri_exchange_busy_wait(phba);
 
        /* per-phba callback de-registration for hotplug event */
        if (phba->pport)
@@ -13374,15 +13398,12 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
        /* Disable FW logging to host memory */
        lpfc_ras_stop_fwlog(phba);
 
-       /* Unset the queues shared with the hardware then release all
-        * allocated resources.
-        */
-       lpfc_sli4_queue_unset(phba);
-       lpfc_sli4_queue_destroy(phba);
-
        /* Reset SLI4 HBA FCoE function */
        lpfc_pci_function_reset(phba);
 
+       /* release all queue allocated resources. */
+       lpfc_sli4_queue_destroy(phba);
+
        /* Free RAS DMA memory */
        if (phba->ras_fwlog.ras_enabled)
                lpfc_sli4_ras_dma_free(phba);
@@ -14262,6 +14283,7 @@ lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
                        "2711 PCI channel permanent disable for failure\n");
        /* Block all SCSI devices' I/Os on the host */
        lpfc_scsi_dev_block(phba);
+       lpfc_sli4_prep_dev_for_reset(phba);
 
        /* stop all timers */
        lpfc_stop_hba_timers(phba);
@@ -15057,24 +15079,28 @@ lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
 static void
 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
 {
-       lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
-                       "2826 PCI channel disable preparing for reset\n");
+       int offline =  pci_channel_offline(phba->pcidev);
+
+       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "2826 PCI channel disable preparing for reset offline"
+                       " %d\n", offline);
 
        /* Block any management I/Os to the device */
        lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
 
-       /* Block all SCSI devices' I/Os on the host */
-       lpfc_scsi_dev_block(phba);
 
+       /* HBA_PCI_ERR was set in io_error_detect */
+       lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
        /* Flush all driver's outstanding I/Os as we are to reset */
        lpfc_sli_flush_io_rings(phba);
+       lpfc_offline(phba);
 
        /* stop all timers */
        lpfc_stop_hba_timers(phba);
 
+       lpfc_sli4_queue_destroy(phba);
        /* Disable interrupt and pci device */
        lpfc_sli4_disable_intr(phba);
-       lpfc_sli4_queue_destroy(phba);
        pci_disable_device(phba->pcidev);
 }
 
@@ -15123,6 +15149,7 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
 {
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+       bool hba_pci_err;
 
        switch (state) {
        case pci_channel_io_normal:
@@ -15130,17 +15157,24 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
                lpfc_sli4_prep_dev_for_recover(phba);
                return PCI_ERS_RESULT_CAN_RECOVER;
        case pci_channel_io_frozen:
-               phba->hba_flag |= HBA_PCI_ERR;
+               hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
                /* Fatal error, prepare for slot reset */
-               lpfc_sli4_prep_dev_for_reset(phba);
+               if (!hba_pci_err)
+                       lpfc_sli4_prep_dev_for_reset(phba);
+               else
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "2832  Already handling PCI error "
+                                       "state: x%x\n", state);
                return PCI_ERS_RESULT_NEED_RESET;
        case pci_channel_io_perm_failure:
-               phba->hba_flag |= HBA_PCI_ERR;
+               set_bit(HBA_PCI_ERR, &phba->bit_flags);
                /* Permanent failure, prepare for device down */
                lpfc_sli4_prep_dev_for_perm_failure(phba);
                return PCI_ERS_RESULT_DISCONNECT;
        default:
-               phba->hba_flag |= HBA_PCI_ERR;
+               hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
+               if (!hba_pci_err)
+                       lpfc_sli4_prep_dev_for_reset(phba);
                /* Unknown state, prepare and request slot reset */
                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
                                "2825 Unknown PCI error state: x%x\n", state);
@@ -15174,17 +15208,21 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
        struct lpfc_sli *psli = &phba->sli;
        uint32_t intr_mode;
+       bool hba_pci_err;
 
        dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
        if (pci_enable_device_mem(pdev)) {
                printk(KERN_ERR "lpfc: Cannot re-enable "
-                       "PCI device after reset.\n");
+                      "PCI device after reset.\n");
                return PCI_ERS_RESULT_DISCONNECT;
        }
 
        pci_restore_state(pdev);
 
-       phba->hba_flag &= ~HBA_PCI_ERR;
+       hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags);
+       if (!hba_pci_err)
+               dev_info(&pdev->dev,
+                        "hba_pci_err was not set, recovering slot reset.\n");
        /*
         * As the new kernel behavior of pci_restore_state() API call clears
         * device saved_state flag, need to save the restored state again.
@@ -15198,6 +15236,8 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
        psli->sli_flag &= ~LPFC_SLI_ACTIVE;
        spin_unlock_irq(&phba->hbalock);
 
+       /* Init cpu_map array */
+       lpfc_cpu_map_array_init(phba);
        /* Configure and enable interrupt */
        intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
        if (intr_mode == LPFC_INTR_ERROR) {
@@ -15239,8 +15279,6 @@ lpfc_io_resume_s4(struct pci_dev *pdev)
         */
        if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
                /* Perform device reset */
-               lpfc_offline_prep(phba, LPFC_MBX_WAIT);
-               lpfc_offline(phba);
                lpfc_sli_brdrestart(phba);
                /* Bring the device back online */
                lpfc_online(phba);
index 1213a299f9aae96efd3404cd0c4554b3dd236d6f..8d26f207ebd22724838e9c90aed8a55b0e5d04ce 100644 (file)
@@ -93,6 +93,11 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
 
        lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
        vport = lport->vport;
+
+       if (!vport || vport->load_flag & FC_UNLOADING ||
+           vport->phba->hba_flag & HBA_IOQ_FLUSH)
+               return -ENODEV;
+
        qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
        if (qhandle == NULL)
                return -ENOMEM;
@@ -267,7 +272,8 @@ lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
                return -EINVAL;
 
        remoteport = lpfc_rport->remoteport;
-       if (!vport->localport)
+       if (!vport->localport ||
+           vport->phba->hba_flag & HBA_IOQ_FLUSH)
                return -EINVAL;
 
        lport = vport->localport->private;
@@ -559,6 +565,8 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                                 ndlp->nlp_DID, ntype, nstate);
                return -ENODEV;
        }
+       if (vport->phba->hba_flag & HBA_IOQ_FLUSH)
+               return -ENODEV;
 
        if (!vport->phba->sli4_hba.nvmels_wq)
                return -ENOMEM;
@@ -662,7 +670,8 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
                return -EINVAL;
 
        vport = lport->vport;
-       if (vport->load_flag & FC_UNLOADING)
+       if (vport->load_flag & FC_UNLOADING ||
+           vport->phba->hba_flag & HBA_IOQ_FLUSH)
                return -ENODEV;
 
        atomic_inc(&lport->fc4NvmeLsRequests);
@@ -1516,7 +1525,8 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
 
        phba = vport->phba;
 
-       if (unlikely(vport->load_flag & FC_UNLOADING)) {
+       if ((unlikely(vport->load_flag & FC_UNLOADING)) ||
+           phba->hba_flag & HBA_IOQ_FLUSH) {
                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
                                 "6124 Fail IO, Driver unload\n");
                atomic_inc(&lport->xmt_fcp_err);
@@ -2169,8 +2179,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
                        abts_nvme = 0;
                        for (i = 0; i < phba->cfg_hdw_queue; i++) {
                                qp = &phba->sli4_hba.hdwq[i];
-                               if (!vport || !vport->localport ||
-                                   !qp || !qp->io_wq)
+                               if (!vport->localport || !qp || !qp->io_wq)
                                        return;
 
                                pring = qp->io_wq->pring;
@@ -2180,8 +2189,9 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
                                abts_scsi += qp->abts_scsi_io_bufs;
                                abts_nvme += qp->abts_nvme_io_bufs;
                        }
-                       if (!vport || !vport->localport ||
-                           vport->phba->hba_flag & HBA_PCI_ERR)
+                       if (!vport->localport ||
+                           test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) ||
+                           vport->load_flag & FC_UNLOADING)
                                return;
 
                        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -2541,8 +2551,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                 * return values is ignored.  The upcall is a courtesy to the
                 * transport.
                 */
-               if (vport->load_flag & FC_UNLOADING ||
-                   unlikely(vport->phba->hba_flag & HBA_PCI_ERR))
+               if (vport->load_flag & FC_UNLOADING)
                        (void)nvme_fc_set_remoteport_devloss(remoteport, 0);
 
                ret = nvme_fc_unregister_remoteport(remoteport);
index 3c132604fd9136e3269717e4c43a572fd873b9df..ba9dbb51b75f06b9536695a0ec943695844703f5 100644 (file)
@@ -5929,13 +5929,15 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
        }
 
        lpfc_cmd->waitq = &waitq;
-       if (phba->sli_rev == LPFC_SLI_REV4)
+       if (phba->sli_rev == LPFC_SLI_REV4) {
                spin_unlock(&pring_s4->ring_lock);
-       else
+               ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb,
+                                                     lpfc_sli_abort_fcp_cmpl);
+       } else {
                pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
-
-       ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
-                                            lpfc_sli_abort_fcp_cmpl);
+               ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
+                                                    lpfc_sli_abort_fcp_cmpl);
+       }
 
        /* Make sure HBA is alive */
        lpfc_issue_hb_tmo(phba);
index 20d40957a3853d9868e1c25038bcea4997b9ffc5..bda2a7ba4e77fe5f19c7c60521ac6a9ea5d7424f 100644 (file)
@@ -2828,6 +2828,12 @@ __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        ndlp->nlp_flag &= ~NLP_UNREG_INP;
 }
 
+void
+lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+       __lpfc_sli_rpi_release(vport, ndlp);
+}
+
 /**
  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
  * @phba: Pointer to HBA context object.
@@ -3715,7 +3721,15 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
        unsigned long iflag;
        u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
 
+       if (phba->sli_rev == LPFC_SLI_REV4)
+               spin_lock_irqsave(&pring->ring_lock, iflag);
+       else
+               spin_lock_irqsave(&phba->hbalock, iflag);
        cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
+       if (phba->sli_rev == LPFC_SLI_REV4)
+               spin_unlock_irqrestore(&pring->ring_lock, iflag);
+       else
+               spin_unlock_irqrestore(&phba->hbalock, iflag);
 
        ulp_command = get_job_cmnd(phba, saveq);
        ulp_status = get_job_ulpstatus(phba, saveq);
@@ -4052,10 +4066,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
                                break;
                        }
 
-                       spin_unlock_irqrestore(&phba->hbalock, iflag);
                        cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
                                                         &rspiocbq);
-                       spin_lock_irqsave(&phba->hbalock, iflag);
                        if (unlikely(!cmdiocbq))
                                break;
                        if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
@@ -4536,42 +4548,62 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
 void
 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
 {
-       LIST_HEAD(completions);
+       LIST_HEAD(tx_completions);
+       LIST_HEAD(txcmplq_completions);
        struct lpfc_iocbq *iocb, *next_iocb;
+       int offline;
 
        if (pring->ringno == LPFC_ELS_RING) {
                lpfc_fabric_abort_hba(phba);
        }
+       offline = pci_channel_offline(phba->pcidev);
 
        /* Error everything on txq and txcmplq
         * First do the txq.
         */
        if (phba->sli_rev >= LPFC_SLI_REV4) {
                spin_lock_irq(&pring->ring_lock);
-               list_splice_init(&pring->txq, &completions);
+               list_splice_init(&pring->txq, &tx_completions);
                pring->txq_cnt = 0;
-               spin_unlock_irq(&pring->ring_lock);
 
-               spin_lock_irq(&phba->hbalock);
-               /* Next issue ABTS for everything on the txcmplq */
-               list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
-                       lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
-               spin_unlock_irq(&phba->hbalock);
+               if (offline) {
+                       list_splice_init(&pring->txcmplq,
+                                        &txcmplq_completions);
+               } else {
+                       /* Next issue ABTS for everything on the txcmplq */
+                       list_for_each_entry_safe(iocb, next_iocb,
+                                                &pring->txcmplq, list)
+                               lpfc_sli_issue_abort_iotag(phba, pring,
+                                                          iocb, NULL);
+               }
+               spin_unlock_irq(&pring->ring_lock);
        } else {
                spin_lock_irq(&phba->hbalock);
-               list_splice_init(&pring->txq, &completions);
+               list_splice_init(&pring->txq, &tx_completions);
                pring->txq_cnt = 0;
 
-               /* Next issue ABTS for everything on the txcmplq */
-               list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
-                       lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
+               if (offline) {
+                       list_splice_init(&pring->txcmplq, &txcmplq_completions);
+               } else {
+                       /* Next issue ABTS for everything on the txcmplq */
+                       list_for_each_entry_safe(iocb, next_iocb,
+                                                &pring->txcmplq, list)
+                               lpfc_sli_issue_abort_iotag(phba, pring,
+                                                          iocb, NULL);
+               }
                spin_unlock_irq(&phba->hbalock);
        }
-       /* Make sure HBA is alive */
-       lpfc_issue_hb_tmo(phba);
 
+       if (offline) {
+               /* Cancel all the IOCBs from the completions list */
+               lpfc_sli_cancel_iocbs(phba, &txcmplq_completions,
+                                     IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
+       } else {
+               /* Make sure HBA is alive */
+               lpfc_issue_hb_tmo(phba);
+       }
        /* Cancel all the IOCBs from the completions list */
-       lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+       lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT,
                              IOERR_SLI_ABORTED);
 }
 
@@ -4624,11 +4656,6 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
        struct lpfc_iocbq *piocb, *next_iocb;
 
        spin_lock_irq(&phba->hbalock);
-       if (phba->hba_flag & HBA_IOQ_FLUSH ||
-           !phba->sli4_hba.hdwq) {
-               spin_unlock_irq(&phba->hbalock);
-               return;
-       }
        /* Indicate the I/O queues are flushed */
        phba->hba_flag |= HBA_IOQ_FLUSH;
        spin_unlock_irq(&phba->hbalock);
@@ -10997,6 +11024,10 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
        unsigned long iflags;
        int rc;
 
+       /* If the PCI channel is in offline state, do not post iocbs. */
+       if (unlikely(pci_channel_offline(phba->pcidev)))
+               return IOCB_ERROR;
+
        if (phba->sli_rev == LPFC_SLI_REV4) {
                lpfc_sli_prep_wqe(phba, piocb);
 
index e52f37e5d8965ef19abfa3a15d0dc57c02e8c40e..a4d3259b8c52ad55de71ff3b1e93932a573c6820 100644 (file)
@@ -20,7 +20,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "14.2.0.0"
+#define LPFC_DRIVER_VERSION "14.2.0.1"
 #define LPFC_DRIVER_NAME               "lpfc"
 
 /* Used for SLI 2/3 */
index 611871ef15b5d0bcc0a7c010d1f299a9c739b0fe..4919ea54b8277b9c847d40107b501a759fa02037 100644 (file)
@@ -2560,6 +2560,9 @@ struct megasas_instance_template {
 #define MEGASAS_IS_LOGICAL(sdev)                                       \
        ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
 
+#define MEGASAS_IS_LUN_VALID(sdev)                                     \
+       (((sdev)->lun == 0) ? 1 : 0)
+
 #define MEGASAS_DEV_INDEX(scp)                                         \
        (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +   \
        scp->device->id)
index 8bf72dbc33b73ce651ec092b23e59308ce55e233..db6793608447a77ffd878b26a75934aac6ca8469 100644 (file)
@@ -2126,6 +2126,9 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
                        goto scan_target;
                }
                return -ENXIO;
+       } else if (!MEGASAS_IS_LUN_VALID(sdev)) {
+               sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
+               return -ENXIO;
        }
 
 scan_target:
@@ -2156,6 +2159,10 @@ static void megasas_slave_destroy(struct scsi_device *sdev)
        instance = megasas_lookup_instance(sdev->host->host_no);
 
        if (MEGASAS_IS_LOGICAL(sdev)) {
+               if (!MEGASAS_IS_LUN_VALID(sdev)) {
+                       sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
+                       return;
+               }
                ld_tgt_id = MEGASAS_TARGET_ID(sdev);
                instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED;
                if (megasas_dbg_lvl & LD_PD_DEBUG)
index b57f1803371eafd451bd7cdd597dee8f6158bb44..538d2c0cd971303865eeace130c3eb5d95af4168 100644 (file)
@@ -5716,13 +5716,12 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 /**
  * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are
  *     having same upper 32bits in their base memory address.
- * @reply_pool_start_address: Base address of a reply queue set
+ * @start_address: Base address of a reply queue set
  * @pool_sz: Size of single Reply Descriptor Post Queues pool size
  *
  * Return: 1 if reply queues in a set have a same upper 32bits in their base
  * memory address, else 0.
  */
-
 static int
 mpt3sas_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz)
 {
index 0563078227de6c1265c72289e45b84d99424025c..a8dd14c91efdb2d55be89a647f553589fd84fcc4 100644 (file)
@@ -394,10 +394,13 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
                retry_count++;
                if (ioc->config_cmds.smid == smid)
                        mpt3sas_base_free_smid(ioc, smid);
-               if ((ioc->shost_recovery) || (ioc->config_cmds.status &
-                   MPT3_CMD_RESET) || ioc->pci_error_recovery)
+               if (ioc->config_cmds.status & MPT3_CMD_RESET)
                        goto retry_config;
-               issue_host_reset = 1;
+               if (ioc->shost_recovery || ioc->pci_error_recovery) {
+                       issue_host_reset = 0;
+                       r = -EFAULT;
+               } else
+                       issue_host_reset = 1;
                goto free_mem;
        }
 
index 00792767c620d707be9f8a64329b32877327778f..7e476f50935b8bd86a3fdecd167aae099239dd27 100644 (file)
@@ -11035,6 +11035,7 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
 {
        struct _sas_port *mpt3sas_port, *next;
        unsigned long flags;
+       int port_id;
 
        /* remove sibling ports attached to this expander */
        list_for_each_entry_safe(mpt3sas_port, next,
@@ -11055,6 +11056,8 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
                            mpt3sas_port->hba_port);
        }
 
+       port_id = sas_expander->port->port_id;
+
        mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
            sas_expander->sas_address_parent, sas_expander->port);
 
@@ -11062,7 +11065,7 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
            "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
            sas_expander->handle, (unsigned long long)
            sas_expander->sas_address,
-           sas_expander->port->port_id);
+           port_id);
 
        spin_lock_irqsave(&ioc->sas_node_lock, flags);
        list_del(&sas_expander->list);
index 7ac63eb5ccd385292bbda62f7f6ade9edaa44597..2fde496fff5f7409c9fbab5e0cad45a8f1af9903 100644 (file)
@@ -647,6 +647,7 @@ static struct pci_device_id mvs_pci_table[] = {
        { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
        { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
        { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
+       { PCI_VDEVICE(TTI, 0x2640), chip_6440 },
        { PCI_VDEVICE(TTI, 0x2710), chip_9480 },
        { PCI_VDEVICE(TTI, 0x2720), chip_9480 },
        { PCI_VDEVICE(TTI, 0x2721), chip_9480 },
index c4a838635893a6413b944a5423ba900742e8b56f..5d7dfefd6f6c9ee86613bec6b49b00f5d8962603 100644 (file)
@@ -192,10 +192,11 @@ struct sym53c500_data {
        int fast_pio;
 };
 
-static struct scsi_pointer *sym53c500_scsi_pointer(struct scsi_cmnd *cmd)
-{
-       return scsi_cmd_priv(cmd);
-}
+struct sym53c500_cmd_priv {
+       int status;
+       int message;
+       int phase;
+};
 
 enum Phase {
     idle,
@@ -356,7 +357,7 @@ SYM53C500_intr(int irq, void *dev_id)
        struct sym53c500_data *data =
            (struct sym53c500_data *)dev->hostdata;
        struct scsi_cmnd *curSC = data->current_SC;
-       struct scsi_pointer *scsi_pointer = sym53c500_scsi_pointer(curSC);
+       struct sym53c500_cmd_priv *scp = scsi_cmd_priv(curSC);
        int fast_pio = data->fast_pio;
 
        spin_lock_irqsave(dev->host_lock, flags);
@@ -403,12 +404,11 @@ SYM53C500_intr(int irq, void *dev_id)
 
        if (int_reg & 0x20) {           /* Disconnect */
                DEB(printk("SYM53C500: disconnect intr received\n"));
-               if (scsi_pointer->phase != message_in) {        /* Unexpected disconnect */
+               if (scp->phase != message_in) { /* Unexpected disconnect */
                        curSC->result = DID_NO_CONNECT << 16;
                } else {        /* Command complete, return status and message */
-                       curSC->result = (scsi_pointer->Status & 0xff) |
-                               ((scsi_pointer->Message & 0xff) << 8) |
-                               (DID_OK << 16);
+                       curSC->result = (scp->status & 0xff) |
+                               ((scp->message & 0xff) << 8) | (DID_OK << 16);
                }
                goto idle_out;
        }
@@ -419,7 +419,7 @@ SYM53C500_intr(int irq, void *dev_id)
                        struct scatterlist *sg;
                        int i;
 
-                       scsi_pointer->phase = data_out;
+                       scp->phase = data_out;
                        VDEB(printk("SYM53C500: Data-Out phase\n"));
                        outb(FLUSH_FIFO, port_base + CMD_REG);
                        LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
@@ -438,7 +438,7 @@ SYM53C500_intr(int irq, void *dev_id)
                        struct scatterlist *sg;
                        int i;
 
-                       scsi_pointer->phase = data_in;
+                       scp->phase = data_in;
                        VDEB(printk("SYM53C500: Data-In phase\n"));
                        outb(FLUSH_FIFO, port_base + CMD_REG);
                        LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
@@ -453,12 +453,12 @@ SYM53C500_intr(int irq, void *dev_id)
                break;
 
        case 0x02:              /* COMMAND */
-               scsi_pointer->phase = command_ph;
+               scp->phase = command_ph;
                printk("SYM53C500: Warning: Unknown interrupt occurred in command phase!\n");
                break;
 
        case 0x03:              /* STATUS */
-               scsi_pointer->phase = status_ph;
+               scp->phase = status_ph;
                VDEB(printk("SYM53C500: Status phase\n"));
                outb(FLUSH_FIFO, port_base + CMD_REG);
                outb(INIT_CMD_COMPLETE, port_base + CMD_REG);
@@ -471,24 +471,22 @@ SYM53C500_intr(int irq, void *dev_id)
 
        case 0x06:              /* MESSAGE-OUT */
                DEB(printk("SYM53C500: Message-Out phase\n"));
-               scsi_pointer->phase = message_out;
+               scp->phase = message_out;
                outb(SET_ATN, port_base + CMD_REG);     /* Reject the message */
                outb(MSG_ACCEPT, port_base + CMD_REG);
                break;
 
        case 0x07:              /* MESSAGE-IN */
                VDEB(printk("SYM53C500: Message-In phase\n"));
-               scsi_pointer->phase = message_in;
+               scp->phase = message_in;
 
-               scsi_pointer->Status = inb(port_base + SCSI_FIFO);
-               scsi_pointer->Message = inb(port_base + SCSI_FIFO);
+               scp->status = inb(port_base + SCSI_FIFO);
+               scp->message = inb(port_base + SCSI_FIFO);
 
                VDEB(printk("SCSI FIFO size=%d\n", inb(port_base + FIFO_FLAGS) & 0x1f));
-               DEB(printk("Status = %02x  Message = %02x\n",
-                          scsi_pointer->Status, scsi_pointer->Message));
+               DEB(printk("Status = %02x  Message = %02x\n", scp->status, scp->message));
 
-               if (scsi_pointer->Message == SAVE_POINTERS ||
-                   scsi_pointer->Message == DISCONNECT) {
+               if (scp->message == SAVE_POINTERS || scp->message == DISCONNECT) {
                        outb(SET_ATN, port_base + CMD_REG);     /* Reject message */
                        DEB(printk("Discarding SAVE_POINTERS message\n"));
                }
@@ -500,7 +498,7 @@ out:
        return IRQ_HANDLED;
 
 idle_out:
-       scsi_pointer->phase = idle;
+       scp->phase = idle;
        scsi_done(curSC);
        goto out;
 }
@@ -548,7 +546,7 @@ SYM53C500_info(struct Scsi_Host *SChost)
 
 static int SYM53C500_queue_lck(struct scsi_cmnd *SCpnt)
 {
-       struct scsi_pointer *scsi_pointer = sym53c500_scsi_pointer(SCpnt);
+       struct sym53c500_cmd_priv *scp = scsi_cmd_priv(SCpnt);
        int i;
        int port_base = SCpnt->device->host->io_port;
        struct sym53c500_data *data =
@@ -565,9 +563,9 @@ static int SYM53C500_queue_lck(struct scsi_cmnd *SCpnt)
        VDEB(printk("\n"));
 
        data->current_SC = SCpnt;
-       scsi_pointer->phase = command_ph;
-       scsi_pointer->Status = 0;
-       scsi_pointer->Message = 0;
+       scp->phase = command_ph;
+       scp->status = 0;
+       scp->message = 0;
 
        /* We are locked here already by the mid layer */
        REG0(port_base);
@@ -682,7 +680,7 @@ static struct scsi_host_template sym53c500_driver_template = {
      .this_id                  = 7,
      .sg_tablesize             = 32,
      .shost_groups             = SYM53C500_shost_groups,
-     .cmd_size                 = sizeof(struct scsi_pointer),
+     .cmd_size                 = sizeof(struct sym53c500_cmd_priv),
 };
 
 static int SYM53C500_config_check(struct pcmcia_device *p_dev, void *priv_data)
index f90b707c190bfdd49000ef392e92b0cff49dbd30..01c5e8ff4cc5fbe41525fe64bcafa47a060e94ed 100644 (file)
@@ -766,6 +766,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
        pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity       = 0x01;
        pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt          = 0x01;
 
+       /* Enable higher IQs and OQs, 32 to 63, bit 16 */
+       if (pm8001_ha->max_q_num > 32)
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |=
+                                                       1 << 16;
        /* Disable end to end CRC checking */
        pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16);
 
@@ -1027,6 +1031,13 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
        if (0x0000 != gst_len_mpistate)
                return -EBUSY;
 
+       /*
+        *  As per controller datasheet, after successful MPI
+        *  initialization minimum 500ms delay is required before
+        *  issuing commands.
+        */
+       msleep(500);
+
        return 0;
 }
 
@@ -1727,10 +1738,11 @@ static void
 pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
 #ifdef PM8001_USE_MSIX
-       u32 mask;
-       mask = (u32)(1 << vec);
-
-       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF));
+       if (vec < 32)
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, 1U << vec);
+       else
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U,
+                           1U << (vec - 32));
        return;
 #endif
        pm80xx_chip_intx_interrupt_enable(pm8001_ha);
@@ -1746,12 +1758,15 @@ static void
 pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
 #ifdef PM8001_USE_MSIX
-       u32 mask;
-       if (vec == 0xFF)
-               mask = 0xFFFFFFFF;
+       if (vec == 0xFF) {
+               /* disable all vectors 0-31, 32-63 */
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 0xFFFFFFFF);
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 0xFFFFFFFF);
+       } else if (vec < 32)
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 1U << vec);
        else
-               mask = (u32)(1 << vec);
-       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF));
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U,
+                           1U << (vec - 32));
        return;
 #endif
        pm80xx_chip_intx_interrupt_disable(pm8001_ha);
index 928532180d323a81643948c4d95a9e1f2eda02e0..fd674ed1febed1c549859301545da4be19c91717 100644 (file)
@@ -3181,124 +3181,6 @@ static int pmcraid_build_ioadl(
        return 0;
 }
 
-/**
- * pmcraid_free_sglist - Frees an allocated SG buffer list
- * @sglist: scatter/gather list pointer
- *
- * Free a DMA'able memory previously allocated with pmcraid_alloc_sglist
- *
- * Return value:
- *     none
- */
-static void pmcraid_free_sglist(struct pmcraid_sglist *sglist)
-{
-       sgl_free_order(sglist->scatterlist, sglist->order);
-       kfree(sglist);
-}
-
-/**
- * pmcraid_alloc_sglist - Allocates memory for a SG list
- * @buflen: buffer length
- *
- * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
- * list.
- *
- * Return value
- *     pointer to sglist / NULL on failure
- */
-static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
-{
-       struct pmcraid_sglist *sglist;
-       int sg_size;
-       int order;
-
-       sg_size = buflen / (PMCRAID_MAX_IOADLS - 1);
-       order = (sg_size > 0) ? get_order(sg_size) : 0;
-
-       /* Allocate a scatter/gather list for the DMA */
-       sglist = kzalloc(sizeof(struct pmcraid_sglist), GFP_KERNEL);
-       if (sglist == NULL)
-               return NULL;
-
-       sglist->order = order;
-       sgl_alloc_order(buflen, order, false, GFP_KERNEL | __GFP_ZERO,
-                       &sglist->num_sg);
-
-       return sglist;
-}
-
-/**
- * pmcraid_copy_sglist - Copy user buffer to kernel buffer's SG list
- * @sglist: scatter/gather list pointer
- * @buffer: buffer pointer
- * @len: buffer length
- * @direction: data transfer direction
- *
- * Copy a user buffer into a buffer allocated by pmcraid_alloc_sglist
- *
- * Return value:
- * 0 on success / other on failure
- */
-static int pmcraid_copy_sglist(
-       struct pmcraid_sglist *sglist,
-       void __user *buffer,
-       u32 len,
-       int direction
-)
-{
-       struct scatterlist *sg;
-       void *kaddr;
-       int bsize_elem;
-       int i;
-       int rc = 0;
-
-       /* Determine the actual number of bytes per element */
-       bsize_elem = PAGE_SIZE * (1 << sglist->order);
-
-       sg = sglist->scatterlist;
-
-       for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg), buffer += bsize_elem) {
-               struct page *page = sg_page(sg);
-
-               kaddr = kmap(page);
-               if (direction == DMA_TO_DEVICE)
-                       rc = copy_from_user(kaddr, buffer, bsize_elem);
-               else
-                       rc = copy_to_user(buffer, kaddr, bsize_elem);
-
-               kunmap(page);
-
-               if (rc) {
-                       pmcraid_err("failed to copy user data into sg list\n");
-                       return -EFAULT;
-               }
-
-               sg->length = bsize_elem;
-       }
-
-       if (len % bsize_elem) {
-               struct page *page = sg_page(sg);
-
-               kaddr = kmap(page);
-
-               if (direction == DMA_TO_DEVICE)
-                       rc = copy_from_user(kaddr, buffer, len % bsize_elem);
-               else
-                       rc = copy_to_user(buffer, kaddr, len % bsize_elem);
-
-               kunmap(page);
-
-               sg->length = len % bsize_elem;
-       }
-
-       if (rc) {
-               pmcraid_err("failed to copy user data into sg list\n");
-               rc = -EFAULT;
-       }
-
-       return rc;
-}
-
 /**
  * pmcraid_queuecommand_lck - Queue a mid-layer request
  * @scsi_cmd: scsi command struct
@@ -3454,365 +3336,6 @@ static int pmcraid_chr_fasync(int fd, struct file *filep, int mode)
        return rc;
 }
 
-
-/**
- * pmcraid_build_passthrough_ioadls - builds SG elements for passthrough
- * commands sent over IOCTL interface
- *
- * @cmd       : pointer to struct pmcraid_cmd
- * @buflen    : length of the request buffer
- * @direction : data transfer direction
- *
- * Return value
- *  0 on success, non-zero error code on failure
- */
-static int pmcraid_build_passthrough_ioadls(
-       struct pmcraid_cmd *cmd,
-       int buflen,
-       int direction
-)
-{
-       struct pmcraid_sglist *sglist = NULL;
-       struct scatterlist *sg = NULL;
-       struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
-       struct pmcraid_ioadl_desc *ioadl;
-       int i;
-
-       sglist = pmcraid_alloc_sglist(buflen);
-
-       if (!sglist) {
-               pmcraid_err("can't allocate memory for passthrough SGls\n");
-               return -ENOMEM;
-       }
-
-       sglist->num_dma_sg = dma_map_sg(&cmd->drv_inst->pdev->dev,
-                                       sglist->scatterlist,
-                                       sglist->num_sg, direction);
-
-       if (!sglist->num_dma_sg || sglist->num_dma_sg > PMCRAID_MAX_IOADLS) {
-               dev_err(&cmd->drv_inst->pdev->dev,
-                       "Failed to map passthrough buffer!\n");
-               pmcraid_free_sglist(sglist);
-               return -EIO;
-       }
-
-       cmd->sglist = sglist;
-       ioarcb->request_flags0 |= NO_LINK_DESCS;
-
-       ioadl = pmcraid_init_ioadls(cmd, sglist->num_dma_sg);
-
-       /* Initialize IOADL descriptor addresses */
-       for_each_sg(sglist->scatterlist, sg, sglist->num_dma_sg, i) {
-               ioadl[i].data_len = cpu_to_le32(sg_dma_len(sg));
-               ioadl[i].address = cpu_to_le64(sg_dma_address(sg));
-               ioadl[i].flags = 0;
-       }
-
-       /* setup the last descriptor */
-       ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
-
-       return 0;
-}
-
-
-/**
- * pmcraid_release_passthrough_ioadls - release passthrough ioadls
- *
- * @cmd: pointer to struct pmcraid_cmd for which ioadls were allocated
- * @buflen: size of the request buffer
- * @direction: data transfer direction
- *
- * Return value
- *  0 on success, non-zero error code on failure
- */
-static void pmcraid_release_passthrough_ioadls(
-       struct pmcraid_cmd *cmd,
-       int buflen,
-       int direction
-)
-{
-       struct pmcraid_sglist *sglist = cmd->sglist;
-
-       if (buflen > 0) {
-               dma_unmap_sg(&cmd->drv_inst->pdev->dev,
-                            sglist->scatterlist,
-                            sglist->num_sg,
-                            direction);
-               pmcraid_free_sglist(sglist);
-               cmd->sglist = NULL;
-       }
-}
-
-/**
- * pmcraid_ioctl_passthrough - handling passthrough IOCTL commands
- *
- * @pinstance: pointer to adapter instance structure
- * @ioctl_cmd: ioctl code
- * @buflen: unused
- * @arg: pointer to pmcraid_passthrough_buffer user buffer
- *
- * Return value
- *  0 on success, non-zero error code on failure
- */
-static long pmcraid_ioctl_passthrough(
-       struct pmcraid_instance *pinstance,
-       unsigned int ioctl_cmd,
-       unsigned int buflen,
-       void __user *arg
-)
-{
-       struct pmcraid_passthrough_ioctl_buffer *buffer;
-       struct pmcraid_ioarcb *ioarcb;
-       struct pmcraid_cmd *cmd;
-       struct pmcraid_cmd *cancel_cmd;
-       void __user *request_buffer;
-       unsigned long request_offset;
-       unsigned long lock_flags;
-       void __user *ioasa;
-       u32 ioasc;
-       int request_size;
-       int buffer_size;
-       u8 direction;
-       int rc = 0;
-
-       /* If IOA reset is in progress, wait 10 secs for reset to complete */
-       if (pinstance->ioa_reset_in_progress) {
-               rc = wait_event_interruptible_timeout(
-                               pinstance->reset_wait_q,
-                               !pinstance->ioa_reset_in_progress,
-                               msecs_to_jiffies(10000));
-
-               if (!rc)
-                       return -ETIMEDOUT;
-               else if (rc < 0)
-                       return -ERESTARTSYS;
-       }
-
-       /* If adapter is not in operational state, return error */
-       if (pinstance->ioa_state != IOA_STATE_OPERATIONAL) {
-               pmcraid_err("IOA is not operational\n");
-               return -ENOTTY;
-       }
-
-       buffer_size = sizeof(struct pmcraid_passthrough_ioctl_buffer);
-       buffer = kmalloc(buffer_size, GFP_KERNEL);
-
-       if (!buffer) {
-               pmcraid_err("no memory for passthrough buffer\n");
-               return -ENOMEM;
-       }
-
-       request_offset =
-           offsetof(struct pmcraid_passthrough_ioctl_buffer, request_buffer);
-
-       request_buffer = arg + request_offset;
-
-       rc = copy_from_user(buffer, arg,
-                            sizeof(struct pmcraid_passthrough_ioctl_buffer));
-
-       ioasa = arg + offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa);
-
-       if (rc) {
-               pmcraid_err("ioctl: can't copy passthrough buffer\n");
-               rc = -EFAULT;
-               goto out_free_buffer;
-       }
-
-       request_size = le32_to_cpu(buffer->ioarcb.data_transfer_length);
-
-       if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) {
-               direction = DMA_TO_DEVICE;
-       } else {
-               direction = DMA_FROM_DEVICE;
-       }
-
-       if (request_size < 0) {
-               rc = -EINVAL;
-               goto out_free_buffer;
-       }
-
-       /* check if we have any additional command parameters */
-       if (le16_to_cpu(buffer->ioarcb.add_cmd_param_length)
-            > PMCRAID_ADD_CMD_PARAM_LEN) {
-               rc = -EINVAL;
-               goto out_free_buffer;
-       }
-
-       cmd = pmcraid_get_free_cmd(pinstance);
-
-       if (!cmd) {
-               pmcraid_err("free command block is not available\n");
-               rc = -ENOMEM;
-               goto out_free_buffer;
-       }
-
-       cmd->scsi_cmd = NULL;
-       ioarcb = &(cmd->ioa_cb->ioarcb);
-
-       /* Copy the user-provided IOARCB stuff field by field */
-       ioarcb->resource_handle = buffer->ioarcb.resource_handle;
-       ioarcb->data_transfer_length = buffer->ioarcb.data_transfer_length;
-       ioarcb->cmd_timeout = buffer->ioarcb.cmd_timeout;
-       ioarcb->request_type = buffer->ioarcb.request_type;
-       ioarcb->request_flags0 = buffer->ioarcb.request_flags0;
-       ioarcb->request_flags1 = buffer->ioarcb.request_flags1;
-       memcpy(ioarcb->cdb, buffer->ioarcb.cdb, PMCRAID_MAX_CDB_LEN);
-
-       if (buffer->ioarcb.add_cmd_param_length) {
-               ioarcb->add_cmd_param_length =
-                       buffer->ioarcb.add_cmd_param_length;
-               ioarcb->add_cmd_param_offset =
-                       buffer->ioarcb.add_cmd_param_offset;
-               memcpy(ioarcb->add_data.u.add_cmd_params,
-                       buffer->ioarcb.add_data.u.add_cmd_params,
-                       le16_to_cpu(buffer->ioarcb.add_cmd_param_length));
-       }
-
-       /* set hrrq number where the IOA should respond to. Note that all cmds
-        * generated internally uses hrrq_id 0, exception to this is the cmd
-        * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
-        * hrrq_id assigned here in queuecommand
-        */
-       ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
-                         pinstance->num_hrrq;
-
-       if (request_size) {
-               rc = pmcraid_build_passthrough_ioadls(cmd,
-                                                     request_size,
-                                                     direction);
-               if (rc) {
-                       pmcraid_err("couldn't build passthrough ioadls\n");
-                       goto out_free_cmd;
-               }
-       }
-
-       /* If data is being written into the device, copy the data from user
-        * buffers
-        */
-       if (direction == DMA_TO_DEVICE && request_size > 0) {
-               rc = pmcraid_copy_sglist(cmd->sglist,
-                                        request_buffer,
-                                        request_size,
-                                        direction);
-               if (rc) {
-                       pmcraid_err("failed to copy user buffer\n");
-                       goto out_free_sglist;
-               }
-       }
-
-       /* passthrough ioctl is a blocking command so, put the user to sleep
-        * until timeout. Note that a timeout value of 0 means, do timeout.
-        */
-       cmd->cmd_done = pmcraid_internal_done;
-       init_completion(&cmd->wait_for_completion);
-       cmd->completion_req = 1;
-
-       pmcraid_info("command(%d) (CDB[0] = %x) for %x\n",
-                    le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
-                    cmd->ioa_cb->ioarcb.cdb[0],
-                    le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle));
-
-       spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
-       _pmcraid_fire_command(cmd);
-       spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
-
-       /* NOTE ! Remove the below line once abort_task is implemented
-        * in firmware. This line disables ioctl command timeout handling logic
-        * similar to IO command timeout handling, making ioctl commands to wait
-        * until the command completion regardless of timeout value specified in
-        * ioarcb
-        */
-       buffer->ioarcb.cmd_timeout = 0;
-
-       /* If command timeout is specified put caller to wait till that time,
-        * otherwise it would be blocking wait. If command gets timed out, it
-        * will be aborted.
-        */
-       if (buffer->ioarcb.cmd_timeout == 0) {
-               wait_for_completion(&cmd->wait_for_completion);
-       } else if (!wait_for_completion_timeout(
-                       &cmd->wait_for_completion,
-                       msecs_to_jiffies(le16_to_cpu(buffer->ioarcb.cmd_timeout) * 1000))) {
-
-               pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n",
-                       le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
-                       cmd->ioa_cb->ioarcb.cdb[0]);
-
-               spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
-               cancel_cmd = pmcraid_abort_cmd(cmd);
-               spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
-
-               if (cancel_cmd) {
-                       wait_for_completion(&cancel_cmd->wait_for_completion);
-                       ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc);
-                       pmcraid_return_cmd(cancel_cmd);
-
-                       /* if abort task couldn't find the command i.e it got
-                        * completed prior to aborting, return good completion.
-                        * if command got aborted successfully or there was IOA
-                        * reset due to abort task itself getting timedout then
-                        * return -ETIMEDOUT
-                        */
-                       if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
-                           PMCRAID_IOASC_SENSE_KEY(ioasc) == 0x00) {
-                               if (ioasc != PMCRAID_IOASC_GC_IOARCB_NOTFOUND)
-                                       rc = -ETIMEDOUT;
-                               goto out_handle_response;
-                       }
-               }
-
-               /* no command block for abort task or abort task failed to abort
-                * the IOARCB, then wait for 150 more seconds and initiate reset
-                * sequence after timeout
-                */
-               if (!wait_for_completion_timeout(
-                       &cmd->wait_for_completion,
-                       msecs_to_jiffies(150 * 1000))) {
-                       pmcraid_reset_bringup(cmd->drv_inst);
-                       rc = -ETIMEDOUT;
-               }
-       }
-
-out_handle_response:
-       /* copy entire IOASA buffer and return IOCTL success.
-        * If copying IOASA to user-buffer fails, return
-        * EFAULT
-        */
-       if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
-               sizeof(struct pmcraid_ioasa))) {
-               pmcraid_err("failed to copy ioasa buffer to user\n");
-               rc = -EFAULT;
-       }
-
-       /* If the data transfer was from device, copy the data onto user
-        * buffers
-        */
-       else if (direction == DMA_FROM_DEVICE && request_size > 0) {
-               rc = pmcraid_copy_sglist(cmd->sglist,
-                                        request_buffer,
-                                        request_size,
-                                        direction);
-               if (rc) {
-                       pmcraid_err("failed to copy user buffer\n");
-                       rc = -EFAULT;
-               }
-       }
-
-out_free_sglist:
-       pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
-
-out_free_cmd:
-       pmcraid_return_cmd(cmd);
-
-out_free_buffer:
-       kfree(buffer);
-
-       return rc;
-}
-
-
-
-
 /**
  * pmcraid_ioctl_driver - ioctl handler for commands handled by driver itself
  *
@@ -3922,20 +3445,6 @@ static long pmcraid_chr_ioctl(
 
        switch (_IOC_TYPE(cmd)) {
 
-       case PMCRAID_PASSTHROUGH_IOCTL:
-               /* If ioctl code is to download microcode, we need to block
-                * mid-layer requests.
-                */
-               if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
-                       scsi_block_requests(pinstance->host);
-
-               retval = pmcraid_ioctl_passthrough(pinstance, cmd,
-                                                  hdr->buffer_length, argp);
-
-               if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
-                       scsi_unblock_requests(pinstance->host);
-               break;
-
        case PMCRAID_DRIVER_IOCTL:
                arg += sizeof(struct pmcraid_ioctl_header);
                retval = pmcraid_ioctl_driver(pinstance, cmd,
index bbb75318f1e7fb6067d3b19ab4dbe08cd425d6a8..9f59930e8b4fdd1052f80c3d8fc5f7426fb1435c 100644 (file)
@@ -1022,41 +1022,16 @@ struct pmcraid_ioctl_header {
 
 #define PMCRAID_IOCTL_SIGNATURE      "PMCRAID"
 
-/*
- * pmcraid_passthrough_ioctl_buffer - structure given as argument to
- * passthrough(or firmware handled) IOCTL commands. Note that ioarcb requires
- * 32-byte alignment so, it is necessary to pack this structure to avoid any
- * holes between ioctl_header and passthrough buffer
- *
- * .ioactl_header : ioctl header
- * .ioarcb        : filled-up ioarcb buffer, driver always reads this buffer
- * .ioasa         : buffer for ioasa, driver fills this with IOASA from firmware
- * .request_buffer: The I/O buffer (flat), driver reads/writes to this based on
- *                  the transfer directions passed in ioarcb.flags0. Contents
- *                  of this buffer are valid only when ioarcb.data_transfer_len
- *                  is not zero.
- */
-struct pmcraid_passthrough_ioctl_buffer {
-       struct pmcraid_ioctl_header ioctl_header;
-       struct pmcraid_ioarcb ioarcb;
-       struct pmcraid_ioasa  ioasa;
-       u8  request_buffer[];
-} __attribute__ ((packed, aligned(PMCRAID_IOARCB_ALIGNMENT)));
-
 /*
  * keys to differentiate between driver handled IOCTLs and passthrough
  * IOCTLs passed to IOA. driver determines the ioctl type using macro
  * _IOC_TYPE
  */
 #define PMCRAID_DRIVER_IOCTL         'D'
-#define PMCRAID_PASSTHROUGH_IOCTL    'F'
 
 #define DRV_IOCTL(n, size) \
        _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size))
 
-#define FMW_IOCTL(n, size) \
-       _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL,  (n), (size))
-
 /*
  * _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd.
  * This is to facilitate applications avoiding un-necessary memory allocations.
@@ -1069,12 +1044,4 @@ struct pmcraid_passthrough_ioctl_buffer {
 #define PMCRAID_IOCTL_RESET_ADAPTER          \
        DRV_IOCTL(5, sizeof(struct pmcraid_ioctl_header))
 
-/* passthrough/firmware handled commands */
-#define PMCRAID_IOCTL_PASSTHROUGH_COMMAND         \
-       FMW_IOCTL(1, sizeof(struct pmcraid_passthrough_ioctl_buffer))
-
-#define PMCRAID_IOCTL_DOWNLOAD_MICROCODE     \
-       FMW_IOCTL(2, sizeof(struct pmcraid_passthrough_ioctl_buffer))
-
-
 #endif /* _PMCRAID_H */
index 8196f89f404e724a53bb294f14914a71b3594b70..31ec429104e2bfa427c3b16abfe37765dcdd5046 100644 (file)
@@ -860,6 +860,37 @@ static int qedi_task_xmit(struct iscsi_task *task)
        return qedi_iscsi_send_ioreq(task);
 }
 
+static void qedi_offload_work(struct work_struct *work)
+{
+       struct qedi_endpoint *qedi_ep =
+               container_of(work, struct qedi_endpoint, offload_work);
+       struct qedi_ctx *qedi;
+       int wait_delay = 5 * HZ;
+       int ret;
+
+       qedi = qedi_ep->qedi;
+
+       ret = qedi_iscsi_offload_conn(qedi_ep);
+       if (ret) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
+                        qedi_ep->iscsi_cid, qedi_ep, ret);
+               qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+               return;
+       }
+
+       ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
+                                              (qedi_ep->state ==
+                                              EP_STATE_OFLDCONN_COMPL),
+                                              wait_delay);
+       if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) {
+               qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
+                        qedi_ep->iscsi_cid, qedi_ep);
+       }
+}
+
 static struct iscsi_endpoint *
 qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
                int non_blocking)
@@ -908,6 +939,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
        }
        qedi_ep = ep->dd_data;
        memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
+       INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
        qedi_ep->state = EP_STATE_IDLE;
        qedi_ep->iscsi_cid = (u32)-1;
        qedi_ep->qedi = qedi;
@@ -1056,12 +1088,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
        qedi_ep = ep->dd_data;
        qedi = qedi_ep->qedi;
 
+       flush_work(&qedi_ep->offload_work);
+
        if (qedi_ep->state == EP_STATE_OFLDCONN_START)
                goto ep_exit_recover;
 
-       if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
-               flush_work(&qedi_ep->offload_work);
-
        if (qedi_ep->conn) {
                qedi_conn = qedi_ep->conn;
                abrt_conn = qedi_conn->abrt_conn;
@@ -1235,37 +1266,6 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
        return rc;
 }
 
-static void qedi_offload_work(struct work_struct *work)
-{
-       struct qedi_endpoint *qedi_ep =
-               container_of(work, struct qedi_endpoint, offload_work);
-       struct qedi_ctx *qedi;
-       int wait_delay = 5 * HZ;
-       int ret;
-
-       qedi = qedi_ep->qedi;
-
-       ret = qedi_iscsi_offload_conn(qedi_ep);
-       if (ret) {
-               QEDI_ERR(&qedi->dbg_ctx,
-                        "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
-                        qedi_ep->iscsi_cid, qedi_ep, ret);
-               qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
-               return;
-       }
-
-       ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
-                                              (qedi_ep->state ==
-                                              EP_STATE_OFLDCONN_COMPL),
-                                              wait_delay);
-       if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
-               qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
-               QEDI_ERR(&qedi->dbg_ctx,
-                        "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
-                        qedi_ep->iscsi_cid, qedi_ep);
-       }
-}
-
 static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
 {
        struct qedi_ctx *qedi;
@@ -1381,7 +1381,6 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
                          qedi_ep->dst_addr, qedi_ep->dst_port);
        }
 
-       INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
        queue_work(qedi->offload_thread, &qedi_ep->offload_work);
 
        ret = 0;
index c607755cce00d84897f17a8b3027b3309ae1810a..592a290e6cfaad1d7e0f033c8e941040254aa861 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/blkdev.h>
 #include <linux/crc-t10dif.h>
 #include <linux/spinlock.h>
-#include <linux/mutex.h>
 #include <linux/interrupt.h>
 #include <linux/atomic.h>
 #include <linux/hrtimer.h>
@@ -732,9 +731,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
            {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 };
 
-static atomic_t sdebug_num_hosts;
-static DEFINE_MUTEX(add_host_mutex);
-
+static int sdebug_num_hosts;
 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
 static int sdebug_ato = DEF_ATO;
 static int sdebug_cdb_len = DEF_CDB_LEN;
@@ -781,7 +778,6 @@ static int sdebug_uuid_ctl = DEF_UUID_CTL;
 static bool sdebug_random = DEF_RANDOM;
 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
 static bool sdebug_removable = DEF_REMOVABLE;
-static bool sdebug_deflect_incoming;
 static bool sdebug_clustering;
 static bool sdebug_host_lock = DEF_HOST_LOCK;
 static bool sdebug_strict = DEF_STRICT;
@@ -5122,10 +5118,6 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
                       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
        if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
                sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
-       if (smp_load_acquire(&sdebug_deflect_incoming)) {
-               pr_info("Exit early due to deflect_incoming\n");
-               return 1;
-       }
        if (devip == NULL) {
                devip = find_build_dev_info(sdp);
                if (devip == NULL)
@@ -5211,7 +5203,7 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
 }
 
 /* Deletes (stops) timers or work queues of all queued commands */
-static void stop_all_queued(bool done_with_no_conn)
+static void stop_all_queued(void)
 {
        unsigned long iflags;
        int j, k;
@@ -5220,15 +5212,13 @@ static void stop_all_queued(bool done_with_no_conn)
        struct sdebug_queued_cmd *sqcp;
        struct sdebug_dev_info *devip;
        struct sdebug_defer *sd_dp;
-       struct scsi_cmnd *scp;
 
        for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
                spin_lock_irqsave(&sqp->qc_lock, iflags);
                for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
                        if (test_bit(k, sqp->in_use_bm)) {
                                sqcp = &sqp->qc_arr[k];
-                               scp = sqcp->a_cmnd;
-                               if (!scp)
+                               if (sqcp->a_cmnd == NULL)
                                        continue;
                                devip = (struct sdebug_dev_info *)
                                        sqcp->a_cmnd->device->hostdata;
@@ -5243,10 +5233,6 @@ static void stop_all_queued(bool done_with_no_conn)
                                        l_defer_t = SDEB_DEFER_NONE;
                                spin_unlock_irqrestore(&sqp->qc_lock, iflags);
                                stop_qc_helper(sd_dp, l_defer_t);
-                               if (done_with_no_conn && l_defer_t != SDEB_DEFER_NONE) {
-                                       scp->result = DID_NO_CONNECT << 16;
-                                       scsi_done(scp);
-                               }
                                clear_bit(k, sqp->in_use_bm);
                                spin_lock_irqsave(&sqp->qc_lock, iflags);
                        }
@@ -5389,7 +5375,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
                }
        }
        spin_unlock(&sdebug_host_list_lock);
-       stop_all_queued(false);
+       stop_all_queued();
        if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
                sdev_printk(KERN_INFO, SCpnt->device,
                            "%s: %d device(s) found\n", __func__, k);
@@ -5449,50 +5435,13 @@ static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
        }
 }
 
-static void sdeb_block_all_queues(void)
-{
-       int j;
-       struct sdebug_queue *sqp;
-
-       for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
-               atomic_set(&sqp->blocked, (int)true);
-}
-
-static void sdeb_unblock_all_queues(void)
+static void block_unblock_all_queues(bool block)
 {
        int j;
        struct sdebug_queue *sqp;
 
        for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
-               atomic_set(&sqp->blocked, (int)false);
-}
-
-static void
-sdeb_add_n_hosts(int num_hosts)
-{
-       if (num_hosts < 1)
-               return;
-       do {
-               bool found;
-               unsigned long idx;
-               struct sdeb_store_info *sip;
-               bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
-
-               found = false;
-               if (want_phs) {
-                       xa_for_each_marked(per_store_ap, idx, sip, SDEB_XA_NOT_IN_USE) {
-                               sdeb_most_recent_idx = (int)idx;
-                               found = true;
-                               break;
-                       }
-                       if (found)      /* re-use case */
-                               sdebug_add_host_helper((int)idx);
-                       else
-                               sdebug_do_add_host(true /* make new store */);
-               } else {
-                       sdebug_do_add_host(false);
-               }
-       } while (--num_hosts);
+               atomic_set(&sqp->blocked, (int)block);
 }
 
 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
@@ -5505,10 +5454,10 @@ static void tweak_cmnd_count(void)
        modulo = abs(sdebug_every_nth);
        if (modulo < 2)
                return;
-       sdeb_block_all_queues();
+       block_unblock_all_queues(true);
        count = atomic_read(&sdebug_cmnd_count);
        atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
-       sdeb_unblock_all_queues();
+       block_unblock_all_queues(false);
 }
 
 static void clear_queue_stats(void)
@@ -5526,15 +5475,6 @@ static bool inject_on_this_cmd(void)
        return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
 }
 
-static int process_deflect_incoming(struct scsi_cmnd *scp)
-{
-       u8 opcode = scp->cmnd[0];
-
-       if (opcode == SYNCHRONIZE_CACHE || opcode == SYNCHRONIZE_CACHE_16)
-               return 0;
-       return DID_NO_CONNECT << 16;
-}
-
 #define INCLUSIVE_TIMING_MAX_NS 1000000                /* 1 millisecond */
 
 /* Complete the processing of the thread that queued a SCSI command to this
@@ -5544,7 +5484,8 @@ static int process_deflect_incoming(struct scsi_cmnd *scp)
  */
 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
                         int scsi_result,
-                        int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *),
+                        int (*pfp)(struct scsi_cmnd *,
+                                   struct sdebug_dev_info *),
                         int delta_jiff, int ndelay)
 {
        bool new_sd_dp;
@@ -5565,27 +5506,13 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
        }
        sdp = cmnd->device;
 
-       if (delta_jiff == 0) {
-               sqp = get_queue(cmnd);
-               if (atomic_read(&sqp->blocked)) {
-                       if (smp_load_acquire(&sdebug_deflect_incoming))
-                               return process_deflect_incoming(cmnd);
-                       else
-                               return SCSI_MLQUEUE_HOST_BUSY;
-               }
+       if (delta_jiff == 0)
                goto respond_in_thread;
-       }
 
        sqp = get_queue(cmnd);
        spin_lock_irqsave(&sqp->qc_lock, iflags);
        if (unlikely(atomic_read(&sqp->blocked))) {
                spin_unlock_irqrestore(&sqp->qc_lock, iflags);
-               if (smp_load_acquire(&sdebug_deflect_incoming)) {
-                       scsi_result = process_deflect_incoming(cmnd);
-                       goto respond_in_thread;
-               }
-               if (sdebug_verbose)
-                       pr_info("blocked --> SCSI_MLQUEUE_HOST_BUSY\n");
                return SCSI_MLQUEUE_HOST_BUSY;
        }
        num_in_q = atomic_read(&devip->num_in_q);
@@ -5774,12 +5701,8 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
 respond_in_thread:     /* call back to mid-layer using invocation thread */
        cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
        cmnd->result &= ~SDEG_RES_IMMED_MASK;
-       if (cmnd->result == 0 && scsi_result != 0) {
+       if (cmnd->result == 0 && scsi_result != 0)
                cmnd->result = scsi_result;
-               if (sdebug_verbose)
-                       pr_info("respond_in_thread: tag=0x%x, scp->result=0x%x\n",
-                               blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)), scsi_result);
-       }
        scsi_done(cmnd);
        return 0;
 }
@@ -6064,7 +5987,7 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
                        int j, k;
                        struct sdebug_queue *sqp;
 
-                       sdeb_block_all_queues();
+                       block_unblock_all_queues(true);
                        for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
                             ++j, ++sqp) {
                                k = find_first_bit(sqp->in_use_bm,
@@ -6078,7 +6001,7 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
                                sdebug_jdelay = jdelay;
                                sdebug_ndelay = 0;
                        }
-                       sdeb_unblock_all_queues();
+                       block_unblock_all_queues(false);
                }
                return res;
        }
@@ -6104,7 +6027,7 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
                        int j, k;
                        struct sdebug_queue *sqp;
 
-                       sdeb_block_all_queues();
+                       block_unblock_all_queues(true);
                        for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
                             ++j, ++sqp) {
                                k = find_first_bit(sqp->in_use_bm,
@@ -6119,7 +6042,7 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
                                sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
                                                        : DEF_JDELAY;
                        }
-                       sdeb_unblock_all_queues();
+                       block_unblock_all_queues(false);
                }
                return res;
        }
@@ -6433,7 +6356,7 @@ static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
            (n <= SDEBUG_CANQUEUE) &&
            (sdebug_host_max_queue == 0)) {
-               sdeb_block_all_queues();
+               block_unblock_all_queues(true);
                k = 0;
                for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
                     ++j, ++sqp) {
@@ -6448,7 +6371,7 @@ static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
                        atomic_set(&retired_max_queue, k + 1);
                else
                        atomic_set(&retired_max_queue, 0);
-               sdeb_unblock_all_queues();
+               block_unblock_all_queues(false);
                return count;
        }
        return -EINVAL;
@@ -6537,48 +6460,43 @@ static DRIVER_ATTR_RW(virtual_gb);
 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
 {
        /* absolute number of hosts currently active is what is shown */
-       return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&sdebug_num_hosts));
+       return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
 }
 
-/*
- * Accept positive and negative values. Hex values (only positive) may be prefixed by '0x'.
- * To remove all hosts use a large negative number (e.g. -9999). The value 0 does nothing.
- * Returns -EBUSY if another add_host sysfs invocation is active.
- */
 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
                              size_t count)
 {
+       bool found;
+       unsigned long idx;
+       struct sdeb_store_info *sip;
+       bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
        int delta_hosts;
 
-       if (count == 0 || kstrtoint(buf, 0, &delta_hosts))
+       if (sscanf(buf, "%d", &delta_hosts) != 1)
                return -EINVAL;
-       if (sdebug_verbose)
-               pr_info("prior num_hosts=%d, num_to_add=%d\n",
-                       atomic_read(&sdebug_num_hosts), delta_hosts);
-       if (delta_hosts == 0)
-               return count;
-       if (mutex_trylock(&add_host_mutex) == 0)
-               return -EBUSY;
        if (delta_hosts > 0) {
-               sdeb_add_n_hosts(delta_hosts);
-       } else if (delta_hosts < 0) {
-               smp_store_release(&sdebug_deflect_incoming, true);
-               sdeb_block_all_queues();
-               if (delta_hosts >= atomic_read(&sdebug_num_hosts))
-                       stop_all_queued(true);
                do {
-                       if (atomic_read(&sdebug_num_hosts) < 1) {
-                               free_all_queued();
-                               break;
+                       found = false;
+                       if (want_phs) {
+                               xa_for_each_marked(per_store_ap, idx, sip,
+                                                  SDEB_XA_NOT_IN_USE) {
+                                       sdeb_most_recent_idx = (int)idx;
+                                       found = true;
+                                       break;
+                               }
+                               if (found)      /* re-use case */
+                                       sdebug_add_host_helper((int)idx);
+                               else
+                                       sdebug_do_add_host(true);
+                       } else {
+                               sdebug_do_add_host(false);
                        }
+               } while (--delta_hosts);
+       } else if (delta_hosts < 0) {
+               do {
                        sdebug_do_remove_host(false);
                } while (++delta_hosts);
-               sdeb_unblock_all_queues();
-               smp_store_release(&sdebug_deflect_incoming, false);
        }
-       mutex_unlock(&add_host_mutex);
-       if (sdebug_verbose)
-               pr_info("post num_hosts=%d\n", atomic_read(&sdebug_num_hosts));
        return count;
 }
 static DRIVER_ATTR_RW(add_host);
@@ -7089,10 +7007,6 @@ static int __init scsi_debug_init(void)
        sdebug_add_host = 0;
 
        for (k = 0; k < hosts_to_add; k++) {
-               if (smp_load_acquire(&sdebug_deflect_incoming)) {
-                       pr_info("exit early as sdebug_deflect_incoming is set\n");
-                       return 0;
-               }
                if (want_store && k == 0) {
                        ret = sdebug_add_host_helper(idx);
                        if (ret < 0) {
@@ -7110,12 +7024,8 @@ static int __init scsi_debug_init(void)
                }
        }
        if (sdebug_verbose)
-               pr_info("built %d host(s)\n", atomic_read(&sdebug_num_hosts));
+               pr_info("built %d host(s)\n", sdebug_num_hosts);
 
-       /*
-        * Even though all the hosts have been established, due to async device (LU) scanning
-        * by the scsi mid-level, there may still be devices (LUs) being set up.
-        */
        return 0;
 
 bus_unreg:
@@ -7131,17 +7041,12 @@ free_q_arr:
 
 static void __exit scsi_debug_exit(void)
 {
-       int k;
+       int k = sdebug_num_hosts;
 
-       /* Possible race with LUs still being set up; stop them asap */
-       sdeb_block_all_queues();
-       smp_store_release(&sdebug_deflect_incoming, true);
-       stop_all_queued(false);
-       for (k = 0; atomic_read(&sdebug_num_hosts) > 0; k++)
+       stop_all_queued();
+       for (; k; k--)
                sdebug_do_remove_host(true);
        free_all_queued();
-       if (sdebug_verbose)
-               pr_info("removed %d hosts\n", k);
        driver_unregister(&sdebug_driverfs_driver);
        bus_unregister(&pseudo_lld_bus);
        root_device_unregister(pseudo_primary);
@@ -7311,13 +7216,13 @@ static int sdebug_add_host_helper(int per_host_idx)
        sdbg_host->dev.bus = &pseudo_lld_bus;
        sdbg_host->dev.parent = pseudo_primary;
        sdbg_host->dev.release = &sdebug_release_adapter;
-       dev_set_name(&sdbg_host->dev, "adapter%d", atomic_read(&sdebug_num_hosts));
+       dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
 
        error = device_register(&sdbg_host->dev);
        if (error)
                goto clean;
 
-       atomic_inc(&sdebug_num_hosts);
+       ++sdebug_num_hosts;
        return 0;
 
 clean:
@@ -7381,7 +7286,7 @@ static void sdebug_do_remove_host(bool the_end)
                return;
 
        device_unregister(&sdbg_host->dev);
-       atomic_dec(&sdebug_num_hosts);
+       --sdebug_num_hosts;
 }
 
 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
@@ -7389,10 +7294,10 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
        int num_in_q = 0;
        struct sdebug_dev_info *devip;
 
-       sdeb_block_all_queues();
+       block_unblock_all_queues(true);
        devip = (struct sdebug_dev_info *)sdev->hostdata;
        if (NULL == devip) {
-               sdeb_unblock_all_queues();
+               block_unblock_all_queues(false);
                return  -ENODEV;
        }
        num_in_q = atomic_read(&devip->num_in_q);
@@ -7411,7 +7316,7 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
                sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
                            __func__, qdepth, num_in_q);
        }
-       sdeb_unblock_all_queues();
+       block_unblock_all_queues(false);
        return sdev->queue_depth;
 }
 
@@ -7519,12 +7424,13 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
        struct sdebug_defer *sd_dp;
 
        sqp = sdebug_q_arr + queue_num;
-       qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
-       if (qc_idx >= sdebug_max_queue)
-               return 0;
 
        spin_lock_irqsave(&sqp->qc_lock, iflags);
 
+       qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
+       if (qc_idx >= sdebug_max_queue)
+               goto unlock;
+
        for (first = true; first || qc_idx + 1 < sdebug_max_queue; )   {
                if (first) {
                        first = false;
@@ -7589,6 +7495,7 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
                        break;
        }
 
+unlock:
        spin_unlock_irqrestore(&sqp->qc_lock, iflags);
 
        if (num_entries > 0)
index ff89de86545d1f4d8f9d3d050cca5ae419787aae..b02af340c2d3d15e4aa05ef84faafc596386f307 100644 (file)
@@ -30,7 +30,7 @@ static inline const char *scmd_name(const struct scsi_cmnd *scmd)
 {
        struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd);
 
-       if (!rq->q->disk)
+       if (!rq->q || !rq->q->disk)
                return NULL;
        return rq->q->disk->disk_name;
 }
index f4e6c68ac99eddad151716e36e075347ddb54d1d..2ef78083f1eff616ab727352365b9875506dca2a 100644 (file)
@@ -223,6 +223,8 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
        int ret;
        struct sbitmap sb_backup;
 
+       depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev));
+
        /*
         * realloc if new shift is calculated, which is caused by setting
         * up one new default queue depth after calling ->slave_configure
@@ -245,6 +247,9 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
                                scsi_device_max_queue_depth(sdev),
                                new_shift, GFP_KERNEL,
                                sdev->request_queue->node, false, true);
+       if (!ret)
+               sbitmap_resize(&sdev->budget_map, depth);
+
        if (need_free) {
                if (ret)
                        sdev->budget_map = sb_backup;
index 226a50944c005c55a1b2b2ce76b40cc4089247cd..dc6872e352bd4ea09b5cf7c9c945c2631c1af355 100644 (file)
@@ -1384,10 +1384,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
        if (IS_ENABLED(CONFIG_BLK_DEV_BSG)) {
                sdev->bsg_dev = scsi_bsg_register_queue(sdev);
                if (IS_ERR(sdev->bsg_dev)) {
-                       /*
-                        * We're treating error on bsg register as non-fatal, so
-                        * pretend nothing went wrong.
-                        */
                        error = PTR_ERR(sdev->bsg_dev);
                        sdev_printk(KERN_INFO, sdev,
                                    "Failed to register bsg queue, errno=%d\n",
index 27951ea05dd419ae17138625bb5a8d480b93c6f4..2c0dd64159b09d4956b5b105fa3761839ec74f9b 100644 (file)
@@ -86,6 +86,9 @@ struct iscsi_internal {
        struct transport_container session_cont;
 };
 
+static DEFINE_IDR(iscsi_ep_idr);
+static DEFINE_MUTEX(iscsi_ep_idr_mutex);
+
 static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
 
 static struct workqueue_struct *iscsi_conn_cleanup_workq;
@@ -168,6 +171,11 @@ struct device_attribute dev_attr_##_prefix##_##_name =     \
 static void iscsi_endpoint_release(struct device *dev)
 {
        struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+
+       mutex_lock(&iscsi_ep_idr_mutex);
+       idr_remove(&iscsi_ep_idr, ep->id);
+       mutex_unlock(&iscsi_ep_idr_mutex);
+
        kfree(ep);
 }
 
@@ -180,7 +188,7 @@ static ssize_t
 show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
-       return sysfs_emit(buf, "%llu\n", (unsigned long long) ep->id);
+       return sysfs_emit(buf, "%d\n", ep->id);
 }
 static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
 
@@ -193,48 +201,32 @@ static struct attribute_group iscsi_endpoint_group = {
        .attrs = iscsi_endpoint_attrs,
 };
 
-#define ISCSI_MAX_EPID -1
-
-static int iscsi_match_epid(struct device *dev, const void *data)
-{
-       struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
-       const uint64_t *epid = data;
-
-       return *epid == ep->id;
-}
-
 struct iscsi_endpoint *
 iscsi_create_endpoint(int dd_size)
 {
-       struct device *dev;
        struct iscsi_endpoint *ep;
-       uint64_t id;
-       int err;
-
-       for (id = 1; id < ISCSI_MAX_EPID; id++) {
-               dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
-                                       iscsi_match_epid);
-               if (!dev)
-                       break;
-               else
-                       put_device(dev);
-       }
-       if (id == ISCSI_MAX_EPID) {
-               printk(KERN_ERR "Too many connections. Max supported %u\n",
-                      ISCSI_MAX_EPID - 1);
-               return NULL;
-       }
+       int err, id;
 
        ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
        if (!ep)
                return NULL;
 
+       mutex_lock(&iscsi_ep_idr_mutex);
+       id = idr_alloc(&iscsi_ep_idr, ep, 0, -1, GFP_NOIO);
+       if (id < 0) {
+               mutex_unlock(&iscsi_ep_idr_mutex);
+               printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n",
+                      id);
+               goto free_ep;
+       }
+       mutex_unlock(&iscsi_ep_idr_mutex);
+
        ep->id = id;
        ep->dev.class = &iscsi_endpoint_class;
-       dev_set_name(&ep->dev, "ep-%llu", (unsigned long long) id);
+       dev_set_name(&ep->dev, "ep-%d", id);
        err = device_register(&ep->dev);
         if (err)
-                goto free_ep;
+               goto free_id;
 
        err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
        if (err)
@@ -248,6 +240,10 @@ unregister_dev:
        device_unregister(&ep->dev);
        return NULL;
 
+free_id:
+       mutex_lock(&iscsi_ep_idr_mutex);
+       idr_remove(&iscsi_ep_idr, id);
+       mutex_unlock(&iscsi_ep_idr_mutex);
 free_ep:
        kfree(ep);
        return NULL;
@@ -275,14 +271,17 @@ EXPORT_SYMBOL_GPL(iscsi_put_endpoint);
  */
 struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
 {
-       struct device *dev;
+       struct iscsi_endpoint *ep;
 
-       dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
-                               iscsi_match_epid);
-       if (!dev)
-               return NULL;
+       mutex_lock(&iscsi_ep_idr_mutex);
+       ep = idr_find(&iscsi_ep_idr, handle);
+       if (!ep)
+               goto unlock;
 
-       return iscsi_dev_to_endpoint(dev);
+       get_device(&ep->dev);
+unlock:
+       mutex_unlock(&iscsi_ep_idr_mutex);
+       return ep;
 }
 EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
 
@@ -2202,10 +2201,10 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
 
        switch (flag) {
        case STOP_CONN_RECOVER:
-               conn->state = ISCSI_CONN_FAILED;
+               WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
                break;
        case STOP_CONN_TERM:
-               conn->state = ISCSI_CONN_DOWN;
+               WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
                break;
        default:
                iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n",
@@ -2217,6 +2216,49 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
        ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n");
 }
 
+static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
+{
+       struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
+       struct iscsi_endpoint *ep;
+
+       ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
+       WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
+
+       if (!conn->ep || !session->transport->ep_disconnect)
+               return;
+
+       ep = conn->ep;
+       conn->ep = NULL;
+
+       session->transport->unbind_conn(conn, is_active);
+       session->transport->ep_disconnect(ep);
+       ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
+}
+
+static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn,
+                                        struct iscsi_endpoint *ep,
+                                        bool is_active)
+{
+       /* Check if this was a conn error and the kernel took ownership */
+       spin_lock_irq(&conn->lock);
+       if (!test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+               spin_unlock_irq(&conn->lock);
+               iscsi_ep_disconnect(conn, is_active);
+       } else {
+               spin_unlock_irq(&conn->lock);
+               ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
+               mutex_unlock(&conn->ep_mutex);
+
+               flush_work(&conn->cleanup_work);
+               /*
+                * Userspace is now done with the EP so we can release the ref
+                * iscsi_cleanup_conn_work_fn took.
+                */
+               iscsi_put_endpoint(ep);
+               mutex_lock(&conn->ep_mutex);
+       }
+}
+
 static int iscsi_if_stop_conn(struct iscsi_transport *transport,
                              struct iscsi_uevent *ev)
 {
@@ -2237,12 +2279,25 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
                cancel_work_sync(&conn->cleanup_work);
                iscsi_stop_conn(conn, flag);
        } else {
+               /*
+                * For offload, when iscsid is restarted it won't know about
+                * existing endpoints so it can't do a ep_disconnect. We clean
+                * it up here for userspace.
+                */
+               mutex_lock(&conn->ep_mutex);
+               if (conn->ep)
+                       iscsi_if_disconnect_bound_ep(conn, conn->ep, true);
+               mutex_unlock(&conn->ep_mutex);
+
                /*
                 * Figure out if it was the kernel or userspace initiating this.
                 */
+               spin_lock_irq(&conn->lock);
                if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+                       spin_unlock_irq(&conn->lock);
                        iscsi_stop_conn(conn, flag);
                } else {
+                       spin_unlock_irq(&conn->lock);
                        ISCSI_DBG_TRANS_CONN(conn,
                                             "flush kernel conn cleanup.\n");
                        flush_work(&conn->cleanup_work);
@@ -2251,31 +2306,14 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
                 * Only clear for recovery to avoid extra cleanup runs during
                 * termination.
                 */
+               spin_lock_irq(&conn->lock);
                clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
+               spin_unlock_irq(&conn->lock);
        }
        ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n");
        return 0;
 }
 
-static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
-{
-       struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
-       struct iscsi_endpoint *ep;
-
-       ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
-       conn->state = ISCSI_CONN_FAILED;
-
-       if (!conn->ep || !session->transport->ep_disconnect)
-               return;
-
-       ep = conn->ep;
-       conn->ep = NULL;
-
-       session->transport->unbind_conn(conn, is_active);
-       session->transport->ep_disconnect(ep);
-       ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
-}
-
 static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
 {
        struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn,
@@ -2284,18 +2322,11 @@ static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
 
        mutex_lock(&conn->ep_mutex);
        /*
-        * If we are not at least bound there is nothing for us to do. Userspace
-        * will do a ep_disconnect call if offload is used, but will not be
-        * doing a stop since there is nothing to clean up, so we have to clear
-        * the cleanup bit here.
+        * Get a ref to the ep, so we don't release its ID until after
+        * userspace is done referencing it in iscsi_if_disconnect_bound_ep.
         */
-       if (conn->state != ISCSI_CONN_BOUND && conn->state != ISCSI_CONN_UP) {
-               ISCSI_DBG_TRANS_CONN(conn, "Got error while conn is already failed. Ignoring.\n");
-               clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
-               mutex_unlock(&conn->ep_mutex);
-               return;
-       }
-
+       if (conn->ep)
+               get_device(&conn->ep->dev);
        iscsi_ep_disconnect(conn, false);
 
        if (system_state != SYSTEM_RUNNING) {
@@ -2340,11 +2371,12 @@ iscsi_alloc_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
                conn->dd_data = &conn[1];
 
        mutex_init(&conn->ep_mutex);
+       spin_lock_init(&conn->lock);
        INIT_LIST_HEAD(&conn->conn_list);
        INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn);
        conn->transport = transport;
        conn->cid = cid;
-       conn->state = ISCSI_CONN_DOWN;
+       WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
 
        /* this is released in the dev's release function */
        if (!get_device(&session->dev))
@@ -2542,9 +2574,32 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
        struct iscsi_uevent *ev;
        struct iscsi_internal *priv;
        int len = nlmsg_total_size(sizeof(*ev));
+       unsigned long flags;
+       int state;
 
-       if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags))
-               queue_work(iscsi_conn_cleanup_workq, &conn->cleanup_work);
+       spin_lock_irqsave(&conn->lock, flags);
+       /*
+        * Userspace will only do a stop call if we are at least bound. And, we
+        * only need to do the in kernel cleanup if in the UP state so cmds can
+        * be released to upper layers. If in other states just wait for
+        * userspace to avoid races that can leave the cleanup_work queued.
+        */
+       state = READ_ONCE(conn->state);
+       switch (state) {
+       case ISCSI_CONN_BOUND:
+       case ISCSI_CONN_UP:
+               if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP,
+                                     &conn->flags)) {
+                       queue_work(iscsi_conn_cleanup_workq,
+                                  &conn->cleanup_work);
+               }
+               break;
+       default:
+               ISCSI_DBG_TRANS_CONN(conn, "Got conn error in state %d\n",
+                                    state);
+               break;
+       }
+       spin_unlock_irqrestore(&conn->lock, flags);
 
        priv = iscsi_if_transport_lookup(conn->transport);
        if (!priv)
@@ -2894,7 +2949,7 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
        char *data = (char*)ev + sizeof(*ev);
        struct iscsi_cls_conn *conn;
        struct iscsi_cls_session *session;
-       int err = 0, value = 0;
+       int err = 0, value = 0, state;
 
        if (ev->u.set_param.len > PAGE_SIZE)
                return -EINVAL;
@@ -2911,8 +2966,8 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
                        session->recovery_tmo = value;
                break;
        default:
-               if ((conn->state == ISCSI_CONN_BOUND) ||
-                       (conn->state == ISCSI_CONN_UP)) {
+               state = READ_ONCE(conn->state);
+               if (state == ISCSI_CONN_BOUND || state == ISCSI_CONN_UP) {
                        err = transport->set_param(conn, ev->u.set_param.param,
                                        data, ev->u.set_param.len);
                } else {
@@ -2984,16 +3039,7 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
        }
 
        mutex_lock(&conn->ep_mutex);
-       /* Check if this was a conn error and the kernel took ownership */
-       if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
-               ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
-               mutex_unlock(&conn->ep_mutex);
-
-               flush_work(&conn->cleanup_work);
-               goto put_ep;
-       }
-
-       iscsi_ep_disconnect(conn, false);
+       iscsi_if_disconnect_bound_ep(conn, ep, false);
        mutex_unlock(&conn->ep_mutex);
 put_ep:
        iscsi_put_endpoint(ep);
@@ -3696,24 +3742,17 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
                return -EINVAL;
 
        mutex_lock(&conn->ep_mutex);
+       spin_lock_irq(&conn->lock);
        if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+               spin_unlock_irq(&conn->lock);
                mutex_unlock(&conn->ep_mutex);
                ev->r.retcode = -ENOTCONN;
                return 0;
        }
+       spin_unlock_irq(&conn->lock);
 
        switch (nlh->nlmsg_type) {
        case ISCSI_UEVENT_BIND_CONN:
-               if (conn->ep) {
-                       /*
-                        * For offload boot support where iscsid is restarted
-                        * during the pivot root stage, the ep will be intact
-                        * here when the new iscsid instance starts up and
-                        * reconnects.
-                        */
-                       iscsi_ep_disconnect(conn, true);
-               }
-
                session = iscsi_session_lookup(ev->u.b_conn.sid);
                if (!session) {
                        err = -EINVAL;
@@ -3724,7 +3763,7 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
                                                ev->u.b_conn.transport_eph,
                                                ev->u.b_conn.is_leading);
                if (!ev->r.retcode)
-                       conn->state = ISCSI_CONN_BOUND;
+                       WRITE_ONCE(conn->state, ISCSI_CONN_BOUND);
 
                if (ev->r.retcode || !transport->ep_connect)
                        break;
@@ -3743,7 +3782,8 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
        case ISCSI_UEVENT_START_CONN:
                ev->r.retcode = transport->start_conn(conn);
                if (!ev->r.retcode)
-                       conn->state = ISCSI_CONN_UP;
+                       WRITE_ONCE(conn->state, ISCSI_CONN_UP);
+
                break;
        case ISCSI_UEVENT_SEND_PDU:
                pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
@@ -4050,10 +4090,11 @@ static ssize_t show_conn_state(struct device *dev,
 {
        struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent);
        const char *state = "unknown";
+       int conn_state = READ_ONCE(conn->state);
 
-       if (conn->state >= 0 &&
-           conn->state < ARRAY_SIZE(connection_state_names))
-               state = connection_state_names[conn->state];
+       if (conn_state >= 0 &&
+           conn_state < ARRAY_SIZE(connection_state_names))
+               state = connection_state_names[conn_state];
 
        return sysfs_emit(buf, "%s\n", state);
 }
index a390679cf45848925889f43af87a8adc328b9c32..dc6e55761fd1f07239e60cd6f65c03ae47376cde 100644 (file)
@@ -3216,6 +3216,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
                        sd_read_block_limits(sdkp);
                        sd_read_block_characteristics(sdkp);
                        sd_zbc_read_zones(sdkp, buffer);
+                       sd_read_cpr(sdkp);
                }
 
                sd_print_capacity(sdkp, old_capacity);
@@ -3225,7 +3226,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
                sd_read_app_tag_own(sdkp, buffer);
                sd_read_write_same(sdkp, buffer);
                sd_read_security(sdkp, buffer);
-               sd_read_cpr(sdkp);
        }
 
        /*
@@ -3475,6 +3475,7 @@ static int sd_probe(struct device *dev)
        error = device_add_disk(dev, gd, NULL);
        if (error) {
                put_device(&sdkp->disk_dev);
+               blk_cleanup_disk(gd);
                goto out;
        }
 
index 5ba9df334968d6dc2ef284a99e4c1eb760003199..cbd92891a762c854fe35181fe1bb355dd94c776a 100644 (file)
@@ -535,7 +535,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
 
        scsi_autopm_get_device(sdev);
 
-       if (ret != CDROMCLOSETRAY && ret != CDROMEJECT) {
+       if (cmd != CDROMCLOSETRAY && cmd != CDROMEJECT) {
                ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
                if (ret != -ENOSYS)
                        goto put;
index 0d2e950d0865eb7f629e4d5c901fa252d38d44bb..586c0e567ff9abd9c51123cee8f0f15d36bed1c7 100644 (file)
@@ -957,18 +957,6 @@ static const struct reset_control_ops ufs_qcom_reset_ops = {
        .deassert = ufs_qcom_reset_deassert,
 };
 
-#define        ANDROID_BOOT_DEV_MAX    30
-static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
-
-#ifndef MODULE
-static int __init get_android_boot_dev(char *str)
-{
-       strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
-       return 1;
-}
-__setup("androidboot.bootdevice=", get_android_boot_dev);
-#endif
-
 /**
  * ufs_qcom_init - bind phy with controller
  * @hba: host controller instance
@@ -988,9 +976,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
        struct resource *res;
        struct ufs_clk_info *clki;
 
-       if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
-               return -ENODEV;
-
        host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
        if (!host) {
                err = -ENOMEM;
index f76692053ca17813a1d237fb35aed0e327a832ea..e892b9feffb11e6e7f3b5b3737dd828feaa85371 100644 (file)
@@ -428,6 +428,12 @@ static int ufs_intel_adl_init(struct ufs_hba *hba)
        return ufs_intel_common_init(hba);
 }
 
+static int ufs_intel_mtl_init(struct ufs_hba *hba)
+{
+       hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN;
+       return ufs_intel_common_init(hba);
+}
+
 static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
        .name                   = "intel-pci",
        .init                   = ufs_intel_common_init,
@@ -465,6 +471,16 @@ static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = {
        .device_reset           = ufs_intel_device_reset,
 };
 
+static struct ufs_hba_variant_ops ufs_intel_mtl_hba_vops = {
+       .name                   = "intel-pci",
+       .init                   = ufs_intel_mtl_init,
+       .exit                   = ufs_intel_common_exit,
+       .hce_enable_notify      = ufs_intel_hce_enable_notify,
+       .link_startup_notify    = ufs_intel_link_startup_notify,
+       .resume                 = ufs_intel_resume,
+       .device_reset           = ufs_intel_device_reset,
+};
+
 #ifdef CONFIG_PM_SLEEP
 static int ufshcd_pci_restore(struct device *dev)
 {
@@ -579,6 +595,7 @@ static const struct pci_device_id ufshcd_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
        { PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
        { PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
+       { PCI_VDEVICE(INTEL, 0x7E47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
        { }     /* terminate list */
 };
 
index 88c20f3608c24b80635d689f2430d4c27d758a09..94f545be183aa13aa572b3ba12a90ee8bbf6b980 100644 (file)
@@ -820,8 +820,6 @@ struct ufs_hba {
        enum ufs_pm_level rpm_lvl;
        /* Desired UFS power management level during system PM */
        enum ufs_pm_level spm_lvl;
-       struct device_attribute rpm_lvl_attr;
-       struct device_attribute spm_lvl_attr;
        int pm_op_in_progress;
 
        /* Auto-Hibernate Idle Timer register value */
index b2bec19022cdd31868fc5047aedb5f45d7c34e6c..81099b68bbfbd13c8b1226d76e5c2482a37e0553 100644 (file)
@@ -867,12 +867,6 @@ static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
        struct ufshpb_region *rgn, *victim_rgn = NULL;
 
        list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
-               if (!rgn) {
-                       dev_err(&hpb->sdev_ufs_lu->sdev_dev,
-                               "%s: no region allocated\n",
-                               __func__);
-                       return NULL;
-               }
                if (ufshpb_check_srgns_issue_state(hpb, rgn))
                        continue;
 
@@ -888,6 +882,11 @@ static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
                break;
        }
 
+       if (!victim_rgn)
+               dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+                       "%s: no region allocated\n",
+                       __func__);
+
        return victim_rgn;
 }
 
index 0e6110da69e7646f3b4b01cee646bad26222d328..578c4b6d0f7d97b1caddb46d8e0baef6f0b21822 100644 (file)
@@ -988,7 +988,7 @@ static struct virtio_driver virtio_scsi_driver = {
        .remove = virtscsi_remove,
 };
 
-static int __init init(void)
+static int __init virtio_scsi_init(void)
 {
        int ret = -ENOMEM;
 
@@ -1020,14 +1020,14 @@ error:
        return ret;
 }
 
-static void __exit fini(void)
+static void __exit virtio_scsi_fini(void)
 {
        unregister_virtio_driver(&virtio_scsi_driver);
        mempool_destroy(virtscsi_cmd_pool);
        kmem_cache_destroy(virtscsi_cmd_cache);
 }
-module_init(init);
-module_exit(fini);
+module_init(virtio_scsi_init);
+module_exit(virtio_scsi_fini);
 
 MODULE_DEVICE_TABLE(virtio, id_table);
 MODULE_DESCRIPTION("Virtio SCSI HBA driver");
index 27b9e2baab1a61c2ca62b7caf0f9ced69ee0025c..7acf9193a9e800519f6381b8ef27b201bf34650d 100644 (file)
@@ -159,6 +159,8 @@ static void zorro7xx_remove_one(struct zorro_dev *z)
        scsi_remove_host(host);
 
        NCR_700_release(host);
+       if (host->base > 0x01000000)
+               iounmap(hostdata->base);
        kfree(hostdata);
        free_irq(host->irq, host);
        zorro_release_device(z);
index 92d9610df1fd8fb50857839a5a1041bd9c80be08..938017a60c8ed2ae54ed01ecb5edad7a2e1b83b7 100644 (file)
@@ -277,6 +277,9 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op)
 static bool atmel_qspi_supports_op(struct spi_mem *mem,
                                   const struct spi_mem_op *op)
 {
+       if (!spi_mem_default_supports_op(mem, op))
+               return false;
+
        if (atmel_qspi_find_mode(op) < 0)
                return false;
 
index 86c76211b3d3dd4f735cd788d97159514a8a1250..cad2d55dcd3d2b7c05642461a2fe8f23fbae28f9 100644 (file)
@@ -1205,7 +1205,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
        addr = op->addr.val;
        len = op->data.nbytes;
 
-       if (bcm_qspi_bspi_ver_three(qspi) == true) {
+       if (has_bspi(qspi) && bcm_qspi_bspi_ver_three(qspi) == true) {
                /*
                 * The address coming into this function is a raw flash offset.
                 * But for BSPI <= V3, we need to convert it to a remapped BSPI
@@ -1224,7 +1224,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
            len < 4)
                mspi_read = true;
 
-       if (mspi_read)
+       if (!has_bspi(qspi) || mspi_read)
                return bcm_qspi_mspi_exec_mem_op(spi, op);
 
        ret = bcm_qspi_bspi_set_mode(qspi, op, 0);
index b0c9f62ccefbb9eaa83aa9d9897fa8d9c0a6148a..19686fb47bb352727fc9f2038403dd300909469a 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/iopoll.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
+#include <linux/log2.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/of.h>
@@ -102,12 +103,6 @@ struct cqspi_driver_platdata {
 #define CQSPI_TIMEOUT_MS                       500
 #define CQSPI_READ_TIMEOUT_MS                  10
 
-/* Instruction type */
-#define CQSPI_INST_TYPE_SINGLE                 0
-#define CQSPI_INST_TYPE_DUAL                   1
-#define CQSPI_INST_TYPE_QUAD                   2
-#define CQSPI_INST_TYPE_OCTAL                  3
-
 #define CQSPI_DUMMY_CLKS_PER_BYTE              8
 #define CQSPI_DUMMY_BYTES_MAX                  4
 #define CQSPI_DUMMY_CLKS_MAX                   31
@@ -376,10 +371,6 @@ static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op, bool dtr)
 static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
                              const struct spi_mem_op *op)
 {
-       f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
-       f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
-       f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
-
        /*
         * For an op to be DTR, cmd phase along with every other non-empty
         * phase should have dtr field set to 1. If an op phase has zero
@@ -389,32 +380,23 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
                       (!op->addr.nbytes || op->addr.dtr) &&
                       (!op->data.nbytes || op->data.dtr);
 
-       switch (op->data.buswidth) {
-       case 0:
-               break;
-       case 1:
-               f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
-               break;
-       case 2:
-               f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
-               break;
-       case 4:
-               f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
-               break;
-       case 8:
-               f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
-               break;
-       default:
-               return -EINVAL;
-       }
+       f_pdata->inst_width = 0;
+       if (op->cmd.buswidth)
+               f_pdata->inst_width = ilog2(op->cmd.buswidth);
+
+       f_pdata->addr_width = 0;
+       if (op->addr.buswidth)
+               f_pdata->addr_width = ilog2(op->addr.buswidth);
+
+       f_pdata->data_width = 0;
+       if (op->data.buswidth)
+               f_pdata->data_width = ilog2(op->data.buswidth);
 
        /* Right now we only support 8-8-8 DTR mode. */
        if (f_pdata->dtr) {
                switch (op->cmd.buswidth) {
                case 0:
-                       break;
                case 8:
-                       f_pdata->inst_width = CQSPI_INST_TYPE_OCTAL;
                        break;
                default:
                        return -EINVAL;
@@ -422,9 +404,7 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
 
                switch (op->addr.buswidth) {
                case 0:
-                       break;
                case 8:
-                       f_pdata->addr_width = CQSPI_INST_TYPE_OCTAL;
                        break;
                default:
                        return -EINVAL;
@@ -432,9 +412,7 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
 
                switch (op->data.buswidth) {
                case 0:
-                       break;
                case 8:
-                       f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
                        break;
                default:
                        return -EINVAL;
@@ -1437,9 +1415,24 @@ static bool cqspi_supports_mem_op(struct spi_mem *mem,
        all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
                    !op->data.dtr;
 
-       /* Mixed DTR modes not supported. */
-       if (!(all_true || all_false))
+       if (all_true) {
+               /* Right now we only support 8-8-8 DTR mode. */
+               if (op->cmd.nbytes && op->cmd.buswidth != 8)
+                       return false;
+               if (op->addr.nbytes && op->addr.buswidth != 8)
+                       return false;
+               if (op->data.nbytes && op->data.buswidth != 8)
+                       return false;
+       } else if (all_false) {
+               /* Only 1-1-X ops are supported without DTR */
+               if (op->cmd.nbytes && op->cmd.buswidth > 1)
+                       return false;
+               if (op->addr.nbytes && op->addr.buswidth > 1)
+                       return false;
+       } else {
+               /* Mixed DTR modes are not supported. */
                return false;
+       }
 
        return spi_mem_default_supports_op(mem, op);
 }
index a5ef7a526a7fc9e22d1bc9be094fb144a7d6411c..f6eec7a869b6a7a3a0c05aa3bb134102e50b394d 100644 (file)
@@ -72,6 +72,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
        { PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
        { PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info },
        { PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info },
+       { PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
        { PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
        { PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info },
        { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
index 94fb09696677f14a53f1b24375c4d9429a518252..d167699a1a96bd7015a278f5345dd8e650e6e8ed 100644 (file)
@@ -960,7 +960,17 @@ static int __maybe_unused mtk_nor_suspend(struct device *dev)
 
 static int __maybe_unused mtk_nor_resume(struct device *dev)
 {
-       return pm_runtime_force_resume(dev);
+       struct spi_controller *ctlr = dev_get_drvdata(dev);
+       struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
+       int ret;
+
+       ret = pm_runtime_force_resume(dev);
+       if (ret)
+               return ret;
+
+       mtk_nor_init(sp);
+
+       return 0;
 }
 
 static const struct dev_pm_ops mtk_nor_pm_ops = {
index 55c092069301761336b520f01c861985d7d2bdcf..65be8e085ab8392c25029b2aebd521b7610193ae 100644 (file)
@@ -813,6 +813,7 @@ static int mxic_spi_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(&pdev->dev, "spi_register_master failed\n");
                pm_runtime_disable(&pdev->dev);
+               mxic_spi_mem_ecc_remove(mxic);
        }
 
        return ret;
index fe82f3575df4f38694df791cefb7e1068c16d0ae..24ec1c83f379ceec3c224f5c008572b708982908 100644 (file)
@@ -158,14 +158,18 @@ static int rpcif_spi_probe(struct platform_device *pdev)
 
        error = rpcif_hw_init(rpc, false);
        if (error)
-               return error;
+               goto out_disable_rpm;
 
        error = spi_register_controller(ctlr);
        if (error) {
                dev_err(&pdev->dev, "spi_register_controller failed\n");
-               rpcif_disable_rpm(rpc);
+               goto out_disable_rpm;
        }
 
+       return 0;
+
+out_disable_rpm:
+       rpcif_disable_rpm(rpc);
        return error;
 }
 
index c4dd1200fe99166f81285cdd9a0d09bb4a211f01..2e6d6bbeb7842e5def216a589e670c813e5caf42 100644 (file)
@@ -1130,11 +1130,15 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
 
        if (ctlr->dma_tx)
                tx_dev = ctlr->dma_tx->device->dev;
+       else if (ctlr->dma_map_dev)
+               tx_dev = ctlr->dma_map_dev;
        else
                tx_dev = ctlr->dev.parent;
 
        if (ctlr->dma_rx)
                rx_dev = ctlr->dma_rx->device->dev;
+       else if (ctlr->dma_map_dev)
+               rx_dev = ctlr->dma_map_dev;
        else
                rx_dev = ctlr->dev.parent;
 
@@ -2406,7 +2410,8 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
                        } else {
                                struct acpi_device *adev;
 
-                               if (acpi_bus_get_device(parent_handle, &adev))
+                               adev = acpi_fetch_acpi_dev(parent_handle);
+                               if (!adev)
                                        return -ENODEV;
 
                                ctlr = acpi_spi_find_controller_by_adev(adev);
index d68611ef22f80f07d9f0a6dc18b4efcb927b6ac5..f056204c0fdb1ae7ae0b41784b0e8f7f3f2e64ec 100644 (file)
@@ -70,7 +70,7 @@ static int __nat25_add_pppoe_tag(struct sk_buff *skb, struct pppoe_tag *tag)
        struct pppoe_hdr *ph = (struct pppoe_hdr *)(skb->data + ETH_HLEN);
        int data_len;
 
-       data_len = tag->tag_len + TAG_HDR_LEN;
+       data_len = be16_to_cpu(tag->tag_len) + TAG_HDR_LEN;
        if (skb_tailroom(skb) < data_len)
                return -1;
 
index 95d4ca50a605b7a4bd27f7846ac513b47f306dcf..fd7267baa707838a3eed720728390f1534cb4db2 100644 (file)
@@ -1821,6 +1821,7 @@ static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi)
        mutex_lock(&udev->cmdr_lock);
        page = xa_load(&udev->data_pages, dpi);
        if (likely(page)) {
+               get_page(page);
                mutex_unlock(&udev->cmdr_lock);
                return page;
        }
@@ -1877,6 +1878,7 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
                /* For the vmalloc()ed cmd area pages */
                addr = (void *)(unsigned long)info->mem[mi].addr + offset;
                page = vmalloc_to_page(addr);
+               get_page(page);
        } else {
                uint32_t dpi;
 
@@ -1887,7 +1889,6 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
                        return VM_FAULT_SIGBUS;
        }
 
-       get_page(page);
        vmf->page = page;
        return 0;
 }
index 8a69583777644cda1102c66fff3d9065f9cfcba6..3acc0f1857629cfd9ce92caec03bfbf70222ccc7 100644 (file)
@@ -436,31 +436,31 @@ static void mpc512x_psc_fifo_init(struct uart_port *port)
        out_be32(&FIFO_512x(port)->rximr, MPC512x_PSC_FIFO_ALARM);
 }
 
-static int mpc512x_psc_raw_rx_rdy(struct uart_port *port)
+static unsigned int mpc512x_psc_raw_rx_rdy(struct uart_port *port)
 {
        return !(in_be32(&FIFO_512x(port)->rxsr) & MPC512x_PSC_FIFO_EMPTY);
 }
 
-static int mpc512x_psc_raw_tx_rdy(struct uart_port *port)
+static unsigned int mpc512x_psc_raw_tx_rdy(struct uart_port *port)
 {
        return !(in_be32(&FIFO_512x(port)->txsr) & MPC512x_PSC_FIFO_FULL);
 }
 
-static int mpc512x_psc_rx_rdy(struct uart_port *port)
+static unsigned int mpc512x_psc_rx_rdy(struct uart_port *port)
 {
        return in_be32(&FIFO_512x(port)->rxsr)
            & in_be32(&FIFO_512x(port)->rximr)
            & MPC512x_PSC_FIFO_ALARM;
 }
 
-static int mpc512x_psc_tx_rdy(struct uart_port *port)
+static unsigned int mpc512x_psc_tx_rdy(struct uart_port *port)
 {
        return in_be32(&FIFO_512x(port)->txsr)
            & in_be32(&FIFO_512x(port)->tximr)
            & MPC512x_PSC_FIFO_ALARM;
 }
 
-static int mpc512x_psc_tx_empty(struct uart_port *port)
+static unsigned int mpc512x_psc_tx_empty(struct uart_port *port)
 {
        return in_be32(&FIFO_512x(port)->txsr)
            & MPC512x_PSC_FIFO_EMPTY;
@@ -780,29 +780,29 @@ static void mpc5125_psc_fifo_init(struct uart_port *port)
        out_be32(&FIFO_5125(port)->rximr, MPC512x_PSC_FIFO_ALARM);
 }
 
-static int mpc5125_psc_raw_rx_rdy(struct uart_port *port)
+static unsigned int mpc5125_psc_raw_rx_rdy(struct uart_port *port)
 {
        return !(in_be32(&FIFO_5125(port)->rxsr) & MPC512x_PSC_FIFO_EMPTY);
 }
 
-static int mpc5125_psc_raw_tx_rdy(struct uart_port *port)
+static unsigned int mpc5125_psc_raw_tx_rdy(struct uart_port *port)
 {
        return !(in_be32(&FIFO_5125(port)->txsr) & MPC512x_PSC_FIFO_FULL);
 }
 
-static int mpc5125_psc_rx_rdy(struct uart_port *port)
+static unsigned int mpc5125_psc_rx_rdy(struct uart_port *port)
 {
        return in_be32(&FIFO_5125(port)->rxsr) &
               in_be32(&FIFO_5125(port)->rximr) & MPC512x_PSC_FIFO_ALARM;
 }
 
-static int mpc5125_psc_tx_rdy(struct uart_port *port)
+static unsigned int mpc5125_psc_tx_rdy(struct uart_port *port)
 {
        return in_be32(&FIFO_5125(port)->txsr) &
               in_be32(&FIFO_5125(port)->tximr) & MPC512x_PSC_FIFO_ALARM;
 }
 
-static int mpc5125_psc_tx_empty(struct uart_port *port)
+static unsigned int mpc5125_psc_tx_empty(struct uart_port *port)
 {
        return in_be32(&FIFO_5125(port)->txsr) & MPC512x_PSC_FIFO_EMPTY;
 }
index 7d41dfe48adeeeb84faa6f56ad54d612cbe9f343..48c4dadb0c7c7b9f4aac321731ceb59004836748 100644 (file)
 
 #include "ifcvf_base.h"
 
-static inline u8 ifc_ioread8(u8 __iomem *addr)
-{
-       return ioread8(addr);
-}
-static inline u16 ifc_ioread16 (__le16 __iomem *addr)
+struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw)
 {
-       return ioread16(addr);
+       return container_of(hw, struct ifcvf_adapter, vf);
 }
 
-static inline u32 ifc_ioread32(__le32 __iomem *addr)
+u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector)
 {
-       return ioread32(addr);
-}
+       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
 
-static inline void ifc_iowrite8(u8 value, u8 __iomem *addr)
-{
-       iowrite8(value, addr);
-}
+       vp_iowrite16(qid, &cfg->queue_select);
+       vp_iowrite16(vector, &cfg->queue_msix_vector);
 
-static inline void ifc_iowrite16(u16 value, __le16 __iomem *addr)
-{
-       iowrite16(value, addr);
+       return vp_ioread16(&cfg->queue_msix_vector);
 }
 
-static inline void ifc_iowrite32(u32 value, __le32 __iomem *addr)
+u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector)
 {
-       iowrite32(value, addr);
-}
+       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
 
-static void ifc_iowrite64_twopart(u64 val,
-                                 __le32 __iomem *lo, __le32 __iomem *hi)
-{
-       ifc_iowrite32((u32)val, lo);
-       ifc_iowrite32(val >> 32, hi);
-}
+       cfg = hw->common_cfg;
+       vp_iowrite16(vector,  &cfg->msix_config);
 
-struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw)
-{
-       return container_of(hw, struct ifcvf_adapter, vf);
+       return vp_ioread16(&cfg->msix_config);
 }
 
 static void __iomem *get_cap_addr(struct ifcvf_hw *hw,
@@ -158,15 +142,16 @@ next:
                return -EIO;
        }
 
-       hw->nr_vring = ifc_ioread16(&hw->common_cfg->num_queues);
+       hw->nr_vring = vp_ioread16(&hw->common_cfg->num_queues);
 
        for (i = 0; i < hw->nr_vring; i++) {
-               ifc_iowrite16(i, &hw->common_cfg->queue_select);
-               notify_off = ifc_ioread16(&hw->common_cfg->queue_notify_off);
+               vp_iowrite16(i, &hw->common_cfg->queue_select);
+               notify_off = vp_ioread16(&hw->common_cfg->queue_notify_off);
                hw->vring[i].notify_addr = hw->notify_base +
                        notify_off * hw->notify_off_multiplier;
                hw->vring[i].notify_pa = hw->notify_base_pa +
                        notify_off * hw->notify_off_multiplier;
+               hw->vring[i].irq = -EINVAL;
        }
 
        hw->lm_cfg = hw->base[IFCVF_LM_BAR];
@@ -176,17 +161,20 @@ next:
                  hw->common_cfg, hw->notify_base, hw->isr,
                  hw->dev_cfg, hw->notify_off_multiplier);
 
+       hw->vqs_reused_irq = -EINVAL;
+       hw->config_irq = -EINVAL;
+
        return 0;
 }
 
 u8 ifcvf_get_status(struct ifcvf_hw *hw)
 {
-       return ifc_ioread8(&hw->common_cfg->device_status);
+       return vp_ioread8(&hw->common_cfg->device_status);
 }
 
 void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
 {
-       ifc_iowrite8(status, &hw->common_cfg->device_status);
+       vp_iowrite8(status, &hw->common_cfg->device_status);
 }
 
 void ifcvf_reset(struct ifcvf_hw *hw)
@@ -214,11 +202,11 @@ u64 ifcvf_get_hw_features(struct ifcvf_hw *hw)
        u32 features_lo, features_hi;
        u64 features;
 
-       ifc_iowrite32(0, &cfg->device_feature_select);
-       features_lo = ifc_ioread32(&cfg->device_feature);
+       vp_iowrite32(0, &cfg->device_feature_select);
+       features_lo = vp_ioread32(&cfg->device_feature);
 
-       ifc_iowrite32(1, &cfg->device_feature_select);
-       features_hi = ifc_ioread32(&cfg->device_feature);
+       vp_iowrite32(1, &cfg->device_feature_select);
+       features_hi = vp_ioread32(&cfg->device_feature);
 
        features = ((u64)features_hi << 32) | features_lo;
 
@@ -271,12 +259,12 @@ void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset,
 
        WARN_ON(offset + length > hw->config_size);
        do {
-               old_gen = ifc_ioread8(&hw->common_cfg->config_generation);
+               old_gen = vp_ioread8(&hw->common_cfg->config_generation);
                p = dst;
                for (i = 0; i < length; i++)
-                       *p++ = ifc_ioread8(hw->dev_cfg + offset + i);
+                       *p++ = vp_ioread8(hw->dev_cfg + offset + i);
 
-               new_gen = ifc_ioread8(&hw->common_cfg->config_generation);
+               new_gen = vp_ioread8(&hw->common_cfg->config_generation);
        } while (old_gen != new_gen);
 }
 
@@ -289,18 +277,18 @@ void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
        p = src;
        WARN_ON(offset + length > hw->config_size);
        for (i = 0; i < length; i++)
-               ifc_iowrite8(*p++, hw->dev_cfg + offset + i);
+               vp_iowrite8(*p++, hw->dev_cfg + offset + i);
 }
 
 static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
 {
        struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
 
-       ifc_iowrite32(0, &cfg->guest_feature_select);
-       ifc_iowrite32((u32)features, &cfg->guest_feature);
+       vp_iowrite32(0, &cfg->guest_feature_select);
+       vp_iowrite32((u32)features, &cfg->guest_feature);
 
-       ifc_iowrite32(1, &cfg->guest_feature_select);
-       ifc_iowrite32(features >> 32, &cfg->guest_feature);
+       vp_iowrite32(1, &cfg->guest_feature_select);
+       vp_iowrite32(features >> 32, &cfg->guest_feature);
 }
 
 static int ifcvf_config_features(struct ifcvf_hw *hw)
@@ -329,7 +317,7 @@ u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
        ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
        q_pair_id = qid / hw->nr_vring;
        avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
-       last_avail_idx = ifc_ioread16(avail_idx_addr);
+       last_avail_idx = vp_ioread16(avail_idx_addr);
 
        return last_avail_idx;
 }
@@ -344,7 +332,7 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
        q_pair_id = qid / hw->nr_vring;
        avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
        hw->vring[qid].last_avail_idx = num;
-       ifc_iowrite16(num, avail_idx_addr);
+       vp_iowrite16(num, avail_idx_addr);
 
        return 0;
 }
@@ -352,41 +340,23 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
 static int ifcvf_hw_enable(struct ifcvf_hw *hw)
 {
        struct virtio_pci_common_cfg __iomem *cfg;
-       struct ifcvf_adapter *ifcvf;
        u32 i;
 
-       ifcvf = vf_to_adapter(hw);
        cfg = hw->common_cfg;
-       ifc_iowrite16(IFCVF_MSI_CONFIG_OFF, &cfg->msix_config);
-
-       if (ifc_ioread16(&cfg->msix_config) == VIRTIO_MSI_NO_VECTOR) {
-               IFCVF_ERR(ifcvf->pdev, "No msix vector for device config\n");
-               return -EINVAL;
-       }
-
        for (i = 0; i < hw->nr_vring; i++) {
                if (!hw->vring[i].ready)
                        break;
 
-               ifc_iowrite16(i, &cfg->queue_select);
-               ifc_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
+               vp_iowrite16(i, &cfg->queue_select);
+               vp_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
                                     &cfg->queue_desc_hi);
-               ifc_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
+               vp_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
                                      &cfg->queue_avail_hi);
-               ifc_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
+               vp_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
                                     &cfg->queue_used_hi);
-               ifc_iowrite16(hw->vring[i].size, &cfg->queue_size);
-               ifc_iowrite16(i + IFCVF_MSI_QUEUE_OFF, &cfg->queue_msix_vector);
-
-               if (ifc_ioread16(&cfg->queue_msix_vector) ==
-                   VIRTIO_MSI_NO_VECTOR) {
-                       IFCVF_ERR(ifcvf->pdev,
-                                 "No msix vector for queue %u\n", i);
-                       return -EINVAL;
-               }
-
+               vp_iowrite16(hw->vring[i].size, &cfg->queue_size);
                ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
-               ifc_iowrite16(1, &cfg->queue_enable);
+               vp_iowrite16(1, &cfg->queue_enable);
        }
 
        return 0;
@@ -394,18 +364,12 @@ static int ifcvf_hw_enable(struct ifcvf_hw *hw)
 
 static void ifcvf_hw_disable(struct ifcvf_hw *hw)
 {
-       struct virtio_pci_common_cfg __iomem *cfg;
        u32 i;
 
-       cfg = hw->common_cfg;
-       ifc_iowrite16(VIRTIO_MSI_NO_VECTOR, &cfg->msix_config);
-
+       ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
        for (i = 0; i < hw->nr_vring; i++) {
-               ifc_iowrite16(i, &cfg->queue_select);
-               ifc_iowrite16(VIRTIO_MSI_NO_VECTOR, &cfg->queue_msix_vector);
+               ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
        }
-
-       ifc_ioread16(&cfg->queue_msix_vector);
 }
 
 int ifcvf_start_hw(struct ifcvf_hw *hw)
@@ -433,5 +397,5 @@ void ifcvf_stop_hw(struct ifcvf_hw *hw)
 
 void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
 {
-       ifc_iowrite16(qid, hw->vring[qid].notify_addr);
+       vp_iowrite16(qid, hw->vring[qid].notify_addr);
 }
index c486873f370a8416e2b18fa6e47089c7bb4a64a3..115b61f4924b9444cca93a08e72573400bc48f7c 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/pci.h>
 #include <linux/pci_regs.h>
 #include <linux/vdpa.h>
+#include <linux/virtio_pci_modern.h>
 #include <uapi/linux/virtio_net.h>
 #include <uapi/linux/virtio_blk.h>
 #include <uapi/linux/virtio_config.h>
@@ -27,8 +28,6 @@
 
 #define IFCVF_QUEUE_ALIGNMENT  PAGE_SIZE
 #define IFCVF_QUEUE_MAX                32768
-#define IFCVF_MSI_CONFIG_OFF   0
-#define IFCVF_MSI_QUEUE_OFF    1
 #define IFCVF_PCI_MAX_RESOURCE 6
 
 #define IFCVF_LM_CFG_SIZE              0x40
 #define ifcvf_private_to_vf(adapter) \
        (&((struct ifcvf_adapter *)adapter)->vf)
 
+/* all vqs and config interrupt has its own vector */
+#define MSIX_VECTOR_PER_VQ_AND_CONFIG          1
+/* all vqs share a vector, and config interrupt has a separate vector */
+#define MSIX_VECTOR_SHARED_VQ_AND_CONFIG       2
+/* all vqs and config interrupt share a vector */
+#define MSIX_VECTOR_DEV_SHARED                 3
+
 struct vring_info {
        u64 desc;
        u64 avail;
@@ -60,25 +66,27 @@ struct ifcvf_hw {
        u8 __iomem *isr;
        /* Live migration */
        u8 __iomem *lm_cfg;
-       u16 nr_vring;
        /* Notification bar number */
        u8 notify_bar;
+       u8 msix_vector_status;
+       /* virtio-net or virtio-blk device config size */
+       u32 config_size;
        /* Notificaiton bar address */
        void __iomem *notify_base;
        phys_addr_t notify_base_pa;
        u32 notify_off_multiplier;
+       u32 dev_type;
        u64 req_features;
        u64 hw_features;
-       u32 dev_type;
        struct virtio_pci_common_cfg __iomem *common_cfg;
        void __iomem *dev_cfg;
        struct vring_info vring[IFCVF_MAX_QUEUES];
        void __iomem * const *base;
        char config_msix_name[256];
        struct vdpa_callback config_cb;
-       unsigned int config_irq;
-       /* virtio-net or virtio-blk device config size */
-       u32 config_size;
+       int config_irq;
+       int vqs_reused_irq;
+       u16 nr_vring;
 };
 
 struct ifcvf_adapter {
@@ -123,4 +131,6 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
 struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
 int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
 u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
+u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
+u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector);
 #endif /* _IFCVF_H_ */
index d1a6b5ab543c3d8fb949c0f1ba400a6c06a72b12..4366320fb68d3140c63b8c7cd00e93b14cab1a9e 100644 (file)
@@ -27,7 +27,7 @@ static irqreturn_t ifcvf_config_changed(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
+static irqreturn_t ifcvf_vq_intr_handler(int irq, void *arg)
 {
        struct vring_info *vring = arg;
 
@@ -37,76 +37,324 @@ static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
+static irqreturn_t ifcvf_vqs_reused_intr_handler(int irq, void *arg)
+{
+       struct ifcvf_hw *vf = arg;
+       struct vring_info *vring;
+       int i;
+
+       for (i = 0; i < vf->nr_vring; i++) {
+               vring = &vf->vring[i];
+               if (vring->cb.callback)
+                       vring->cb.callback(vring->cb.private);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t ifcvf_dev_intr_handler(int irq, void *arg)
+{
+       struct ifcvf_hw *vf = arg;
+       u8 isr;
+
+       isr = vp_ioread8(vf->isr);
+       if (isr & VIRTIO_PCI_ISR_CONFIG)
+               ifcvf_config_changed(irq, arg);
+
+       return ifcvf_vqs_reused_intr_handler(irq, arg);
+}
+
 static void ifcvf_free_irq_vectors(void *data)
 {
        pci_free_irq_vectors(data);
 }
 
-static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
+static void ifcvf_free_per_vq_irq(struct ifcvf_adapter *adapter)
 {
        struct pci_dev *pdev = adapter->pdev;
        struct ifcvf_hw *vf = &adapter->vf;
        int i;
 
+       for (i = 0; i < vf->nr_vring; i++) {
+               if (vf->vring[i].irq != -EINVAL) {
+                       devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
+                       vf->vring[i].irq = -EINVAL;
+               }
+       }
+}
 
-       for (i = 0; i < queues; i++) {
-               devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
-               vf->vring[i].irq = -EINVAL;
+static void ifcvf_free_vqs_reused_irq(struct ifcvf_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct ifcvf_hw *vf = &adapter->vf;
+
+       if (vf->vqs_reused_irq != -EINVAL) {
+               devm_free_irq(&pdev->dev, vf->vqs_reused_irq, vf);
+               vf->vqs_reused_irq = -EINVAL;
        }
 
-       devm_free_irq(&pdev->dev, vf->config_irq, vf);
+}
+
+static void ifcvf_free_vq_irq(struct ifcvf_adapter *adapter)
+{
+       struct ifcvf_hw *vf = &adapter->vf;
+
+       if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
+               ifcvf_free_per_vq_irq(adapter);
+       else
+               ifcvf_free_vqs_reused_irq(adapter);
+}
+
+static void ifcvf_free_config_irq(struct ifcvf_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct ifcvf_hw *vf = &adapter->vf;
+
+       if (vf->config_irq == -EINVAL)
+               return;
+
+       /* If the irq is shared by all vqs and the config interrupt,
+        * it is already freed in ifcvf_free_vq_irq, so here only
+        * need to free config irq when msix_vector_status != MSIX_VECTOR_DEV_SHARED
+        */
+       if (vf->msix_vector_status != MSIX_VECTOR_DEV_SHARED) {
+               devm_free_irq(&pdev->dev, vf->config_irq, vf);
+               vf->config_irq = -EINVAL;
+       }
+}
+
+static void ifcvf_free_irq(struct ifcvf_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+
+       ifcvf_free_vq_irq(adapter);
+       ifcvf_free_config_irq(adapter);
        ifcvf_free_irq_vectors(pdev);
 }
 
-static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
+/* ifcvf MSIX vectors allocator, this helper tries to allocate
+ * vectors for all virtqueues and the config interrupt.
+ * It returns the number of allocated vectors, negative
+ * return value when fails.
+ */
+static int ifcvf_alloc_vectors(struct ifcvf_adapter *adapter)
 {
        struct pci_dev *pdev = adapter->pdev;
        struct ifcvf_hw *vf = &adapter->vf;
-       int vector, i, ret, irq;
-       u16 max_intr;
+       int max_intr, ret;
 
        /* all queues and config interrupt  */
        max_intr = vf->nr_vring + 1;
+       ret = pci_alloc_irq_vectors(pdev, 1, max_intr, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
 
-       ret = pci_alloc_irq_vectors(pdev, max_intr,
-                                   max_intr, PCI_IRQ_MSIX);
        if (ret < 0) {
                IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
                return ret;
        }
 
+       if (ret < max_intr)
+               IFCVF_INFO(pdev,
+                          "Requested %u vectors, however only %u allocated, lower performance\n",
+                          max_intr, ret);
+
+       return ret;
+}
+
+static int ifcvf_request_per_vq_irq(struct ifcvf_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct ifcvf_hw *vf = &adapter->vf;
+       int i, vector, ret, irq;
+
+       vf->vqs_reused_irq = -EINVAL;
+       for (i = 0; i < vf->nr_vring; i++) {
+               snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n", pci_name(pdev), i);
+               vector = i;
+               irq = pci_irq_vector(pdev, vector);
+               ret = devm_request_irq(&pdev->dev, irq,
+                                      ifcvf_vq_intr_handler, 0,
+                                      vf->vring[i].msix_name,
+                                      &vf->vring[i]);
+               if (ret) {
+                       IFCVF_ERR(pdev, "Failed to request irq for vq %d\n", i);
+                       goto err;
+               }
+
+               vf->vring[i].irq = irq;
+               ret = ifcvf_set_vq_vector(vf, i, vector);
+               if (ret == VIRTIO_MSI_NO_VECTOR) {
+                       IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
+                       goto err;
+               }
+       }
+
+       return 0;
+err:
+       ifcvf_free_irq(adapter);
+
+       return -EFAULT;
+}
+
+static int ifcvf_request_vqs_reused_irq(struct ifcvf_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct ifcvf_hw *vf = &adapter->vf;
+       int i, vector, ret, irq;
+
+       vector = 0;
+       snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-vqs-reused-irq\n", pci_name(pdev));
+       irq = pci_irq_vector(pdev, vector);
+       ret = devm_request_irq(&pdev->dev, irq,
+                              ifcvf_vqs_reused_intr_handler, 0,
+                              vf->vring[0].msix_name, vf);
+       if (ret) {
+               IFCVF_ERR(pdev, "Failed to request reused irq for the device\n");
+               goto err;
+       }
+
+       vf->vqs_reused_irq = irq;
+       for (i = 0; i < vf->nr_vring; i++) {
+               vf->vring[i].irq = -EINVAL;
+               ret = ifcvf_set_vq_vector(vf, i, vector);
+               if (ret == VIRTIO_MSI_NO_VECTOR) {
+                       IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
+                       goto err;
+               }
+       }
+
+       return 0;
+err:
+       ifcvf_free_irq(adapter);
+
+       return -EFAULT;
+}
+
+static int ifcvf_request_dev_irq(struct ifcvf_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct ifcvf_hw *vf = &adapter->vf;
+       int i, vector, ret, irq;
+
+       vector = 0;
+       snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-dev-irq\n", pci_name(pdev));
+       irq = pci_irq_vector(pdev, vector);
+       ret = devm_request_irq(&pdev->dev, irq,
+                              ifcvf_dev_intr_handler, 0,
+                              vf->vring[0].msix_name, vf);
+       if (ret) {
+               IFCVF_ERR(pdev, "Failed to request irq for the device\n");
+               goto err;
+       }
+
+       vf->vqs_reused_irq = irq;
+       for (i = 0; i < vf->nr_vring; i++) {
+               vf->vring[i].irq = -EINVAL;
+               ret = ifcvf_set_vq_vector(vf, i, vector);
+               if (ret == VIRTIO_MSI_NO_VECTOR) {
+                       IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
+                       goto err;
+               }
+       }
+
+       vf->config_irq = irq;
+       ret = ifcvf_set_config_vector(vf, vector);
+       if (ret == VIRTIO_MSI_NO_VECTOR) {
+               IFCVF_ERR(pdev, "No msix vector for device config\n");
+               goto err;
+       }
+
+       return 0;
+err:
+       ifcvf_free_irq(adapter);
+
+       return -EFAULT;
+
+}
+
+static int ifcvf_request_vq_irq(struct ifcvf_adapter *adapter)
+{
+       struct ifcvf_hw *vf = &adapter->vf;
+       int ret;
+
+       if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
+               ret = ifcvf_request_per_vq_irq(adapter);
+       else
+               ret = ifcvf_request_vqs_reused_irq(adapter);
+
+       return ret;
+}
+
+static int ifcvf_request_config_irq(struct ifcvf_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct ifcvf_hw *vf = &adapter->vf;
+       int config_vector, ret;
+
+       if (vf->msix_vector_status == MSIX_VECTOR_DEV_SHARED)
+               return 0;
+
+       if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
+               /* vector 0 ~ vf->nr_vring for vqs, num vf->nr_vring vector for config interrupt */
+               config_vector = vf->nr_vring;
+
+       if (vf->msix_vector_status ==  MSIX_VECTOR_SHARED_VQ_AND_CONFIG)
+               /* vector 0 for vqs and 1 for config interrupt */
+               config_vector = 1;
+
        snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
                 pci_name(pdev));
-       vector = 0;
-       vf->config_irq = pci_irq_vector(pdev, vector);
+       vf->config_irq = pci_irq_vector(pdev, config_vector);
        ret = devm_request_irq(&pdev->dev, vf->config_irq,
                               ifcvf_config_changed, 0,
                               vf->config_msix_name, vf);
        if (ret) {
                IFCVF_ERR(pdev, "Failed to request config irq\n");
-               return ret;
+               goto err;
        }
 
-       for (i = 0; i < vf->nr_vring; i++) {
-               snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
-                        pci_name(pdev), i);
-               vector = i + IFCVF_MSI_QUEUE_OFF;
-               irq = pci_irq_vector(pdev, vector);
-               ret = devm_request_irq(&pdev->dev, irq,
-                                      ifcvf_intr_handler, 0,
-                                      vf->vring[i].msix_name,
-                                      &vf->vring[i]);
-               if (ret) {
-                       IFCVF_ERR(pdev,
-                                 "Failed to request irq for vq %d\n", i);
-                       ifcvf_free_irq(adapter, i);
+       ret = ifcvf_set_config_vector(vf, config_vector);
+       if (ret == VIRTIO_MSI_NO_VECTOR) {
+               IFCVF_ERR(pdev, "No msix vector for device config\n");
+               goto err;
+       }
 
-                       return ret;
-               }
+       return 0;
+err:
+       ifcvf_free_irq(adapter);
 
-               vf->vring[i].irq = irq;
+       return -EFAULT;
+}
+
+static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
+{
+       struct ifcvf_hw *vf = &adapter->vf;
+       int nvectors, ret, max_intr;
+
+       nvectors = ifcvf_alloc_vectors(adapter);
+       if (nvectors <= 0)
+               return -EFAULT;
+
+       vf->msix_vector_status = MSIX_VECTOR_PER_VQ_AND_CONFIG;
+       max_intr = vf->nr_vring + 1;
+       if (nvectors < max_intr)
+               vf->msix_vector_status = MSIX_VECTOR_SHARED_VQ_AND_CONFIG;
+
+       if (nvectors == 1) {
+               vf->msix_vector_status = MSIX_VECTOR_DEV_SHARED;
+               ret = ifcvf_request_dev_irq(adapter);
+
+               return ret;
        }
 
+       ret = ifcvf_request_vq_irq(adapter);
+       if (ret)
+               return ret;
+
+       ret = ifcvf_request_config_irq(adapter);
+
+       if (ret)
+               return ret;
+
        return 0;
 }
 
@@ -263,7 +511,7 @@ static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
 
        if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
                ifcvf_stop_datapath(adapter);
-               ifcvf_free_irq(adapter, vf->nr_vring);
+               ifcvf_free_irq(adapter);
        }
 
        ifcvf_reset_vring(adapter);
@@ -348,7 +596,7 @@ static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
 {
        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 
-       return ioread8(&vf->common_cfg->config_generation);
+       return vp_ioread8(&vf->common_cfg->config_generation);
 }
 
 static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
@@ -410,7 +658,10 @@ static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
 {
        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 
-       return vf->vring[qid].irq;
+       if (vf->vqs_reused_irq < 0)
+               return vf->vring[qid].irq;
+       else
+               return -EINVAL;
 }
 
 static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev,
index d0f91078600e9a5fa09b79d408c6f4359436dc31..79001301b383218b990008714e8613de4903018a 100644 (file)
@@ -163,6 +163,7 @@ struct mlx5_vdpa_net {
        u32 cur_num_vqs;
        struct notifier_block nb;
        struct vdpa_callback config_cb;
+       struct mlx5_vdpa_wq_ent cvq_ent;
 };
 
 static void free_resources(struct mlx5_vdpa_net *ndev);
@@ -1475,7 +1476,7 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
        virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
        struct mlx5_core_dev *pfmdev;
        size_t read;
-       u8 mac[ETH_ALEN];
+       u8 mac[ETH_ALEN], mac_back[ETH_ALEN];
 
        pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
        switch (cmd) {
@@ -1489,6 +1490,9 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
                        break;
                }
 
+               if (is_zero_ether_addr(mac))
+                       break;
+
                if (!is_zero_ether_addr(ndev->config.mac)) {
                        if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) {
                                mlx5_vdpa_warn(mvdev, "failed to delete old MAC %pM from MPFS table\n",
@@ -1503,7 +1507,47 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
                        break;
                }
 
+               /* backup the original mac address so that if failed to add the forward rules
+                * we could restore it
+                */
+               memcpy(mac_back, ndev->config.mac, ETH_ALEN);
+
                memcpy(ndev->config.mac, mac, ETH_ALEN);
+
+               /* Need recreate the flow table entry, so that the packet could forward back
+                */
+               remove_fwd_to_tir(ndev);
+
+               if (add_fwd_to_tir(ndev)) {
+                       mlx5_vdpa_warn(mvdev, "failed to insert forward rules, try to restore\n");
+
+                       /* Although it hardly run here, we still need double check */
+                       if (is_zero_ether_addr(mac_back)) {
+                               mlx5_vdpa_warn(mvdev, "restore mac failed: Original MAC is zero\n");
+                               break;
+                       }
+
+                       /* Try to restore original mac address to MFPS table, and try to restore
+                        * the forward rule entry.
+                        */
+                       if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) {
+                               mlx5_vdpa_warn(mvdev, "restore mac failed: delete MAC %pM from MPFS table failed\n",
+                                              ndev->config.mac);
+                       }
+
+                       if (mlx5_mpfs_add_mac(pfmdev, mac_back)) {
+                               mlx5_vdpa_warn(mvdev, "restore mac failed: insert old MAC %pM into MPFS table failed\n",
+                                              mac_back);
+                       }
+
+                       memcpy(ndev->config.mac, mac_back, ETH_ALEN);
+
+                       if (add_fwd_to_tir(ndev))
+                               mlx5_vdpa_warn(mvdev, "restore forward rules failed: insert forward rules failed\n");
+
+                       break;
+               }
+
                status = VIRTIO_NET_OK;
                break;
 
@@ -1615,6 +1659,12 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
        mvdev = wqent->mvdev;
        ndev = to_mlx5_vdpa_ndev(mvdev);
        cvq = &mvdev->cvq;
+
+       mutex_lock(&ndev->reslock);
+
+       if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
+               goto out;
+
        if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
                goto out;
 
@@ -1653,9 +1703,13 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
 
                if (vringh_need_notify_iotlb(&cvq->vring))
                        vringh_notify(&cvq->vring);
+
+               queue_work(mvdev->wq, &wqent->work);
+               break;
        }
+
 out:
-       kfree(wqent);
+       mutex_unlock(&ndev->reslock);
 }
 
 static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
@@ -1663,22 +1717,15 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
        struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
        struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
        struct mlx5_vdpa_virtqueue *mvq;
-       struct mlx5_vdpa_wq_ent *wqent;
 
        if (!is_index_valid(mvdev, idx))
                return;
 
        if (unlikely(is_ctrl_vq_idx(mvdev, idx))) {
-               if (!mvdev->cvq.ready)
-                       return;
-
-               wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
-               if (!wqent)
+               if (!mvdev->wq || !mvdev->cvq.ready)
                        return;
 
-               wqent->mvdev = mvdev;
-               INIT_WORK(&wqent->work, mlx5_cvq_kick_handler);
-               queue_work(mvdev->wq, &wqent->work);
+               queue_work(mvdev->wq, &ndev->cvq_ent.work);
                return;
        }
 
@@ -2137,7 +2184,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
                goto err_mr;
 
        if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
-               return 0;
+               goto err_mr;
 
        restore_channels_info(ndev);
        err = setup_driver(mvdev);
@@ -2152,12 +2199,14 @@ err_mr:
        return err;
 }
 
+/* reslock must be held for this function */
 static int setup_driver(struct mlx5_vdpa_dev *mvdev)
 {
        struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
        int err;
 
-       mutex_lock(&ndev->reslock);
+       WARN_ON(!mutex_is_locked(&ndev->reslock));
+
        if (ndev->setup) {
                mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n");
                err = 0;
@@ -2187,7 +2236,6 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
                goto err_fwd;
        }
        ndev->setup = true;
-       mutex_unlock(&ndev->reslock);
 
        return 0;
 
@@ -2198,23 +2246,23 @@ err_tir:
 err_rqt:
        teardown_virtqueues(ndev);
 out:
-       mutex_unlock(&ndev->reslock);
        return err;
 }
 
+/* reslock must be held for this function */
 static void teardown_driver(struct mlx5_vdpa_net *ndev)
 {
-       mutex_lock(&ndev->reslock);
+
+       WARN_ON(!mutex_is_locked(&ndev->reslock));
+
        if (!ndev->setup)
-               goto out;
+               return;
 
        remove_fwd_to_tir(ndev);
        destroy_tir(ndev);
        destroy_rqt(ndev);
        teardown_virtqueues(ndev);
        ndev->setup = false;
-out:
-       mutex_unlock(&ndev->reslock);
 }
 
 static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
@@ -2235,6 +2283,8 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
 
        print_status(mvdev, status, true);
 
+       mutex_lock(&ndev->reslock);
+
        if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
                if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
                        err = setup_driver(mvdev);
@@ -2244,16 +2294,19 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
                        }
                } else {
                        mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n");
-                       return;
+                       goto err_clear;
                }
        }
 
        ndev->mvdev.status = status;
+       mutex_unlock(&ndev->reslock);
        return;
 
 err_setup:
        mlx5_vdpa_destroy_mr(&ndev->mvdev);
        ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
+err_clear:
+       mutex_unlock(&ndev->reslock);
 }
 
 static int mlx5_vdpa_reset(struct vdpa_device *vdev)
@@ -2263,6 +2316,8 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
 
        print_status(mvdev, 0, true);
        mlx5_vdpa_info(mvdev, "performing device reset\n");
+
+       mutex_lock(&ndev->reslock);
        teardown_driver(ndev);
        clear_vqs_ready(ndev);
        mlx5_vdpa_destroy_mr(&ndev->mvdev);
@@ -2275,6 +2330,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
                if (mlx5_vdpa_create_mr(mvdev, NULL))
                        mlx5_vdpa_warn(mvdev, "create MR failed\n");
        }
+       mutex_unlock(&ndev->reslock);
 
        return 0;
 }
@@ -2310,19 +2366,24 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
 static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
 {
        struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
        bool change_map;
        int err;
 
+       mutex_lock(&ndev->reslock);
+
        err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
        if (err) {
                mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
-               return err;
+               goto err;
        }
 
        if (change_map)
-               return mlx5_vdpa_change_map(mvdev, iotlb);
+               err = mlx5_vdpa_change_map(mvdev, iotlb);
 
-       return 0;
+err:
+       mutex_unlock(&ndev->reslock);
+       return err;
 }
 
 static void mlx5_vdpa_free(struct vdpa_device *vdev)
@@ -2565,6 +2626,28 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p
        return ret;
 }
 
+static int config_func_mtu(struct mlx5_core_dev *mdev, u16 mtu)
+{
+       int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+       void *in;
+       int err;
+
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
+       MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu,
+                mtu + MLX5V_ETH_HARD_MTU);
+       MLX5_SET(modify_nic_vport_context_in, in, opcode,
+                MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
+
+       err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
+
+       kvfree(in);
+       return err;
+}
+
 static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
                             const struct vdpa_dev_set_config *add_config)
 {
@@ -2624,6 +2707,13 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
        init_mvqs(ndev);
        mutex_init(&ndev->reslock);
        config = &ndev->config;
+
+       if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU)) {
+               err = config_func_mtu(mdev, add_config->net.mtu);
+               if (err)
+                       goto err_mtu;
+       }
+
        err = query_mtu(mdev, &mtu);
        if (err)
                goto err_mtu;
@@ -2668,6 +2758,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
        if (err)
                goto err_mr;
 
+       ndev->cvq_ent.mvdev = mvdev;
+       INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler);
        mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq");
        if (!mvdev->wq) {
                err = -ENOMEM;
@@ -2707,9 +2799,12 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
        struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
        struct mlx5_vdpa_dev *mvdev = to_mvdev(dev);
        struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+       struct workqueue_struct *wq;
 
        mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
-       destroy_workqueue(mvdev->wq);
+       wq = mvdev->wq;
+       mvdev->wq = NULL;
+       destroy_workqueue(wq);
        _vdpa_unregister_device(dev);
        mgtdev->ndev = NULL;
 }
@@ -2741,7 +2836,8 @@ static int mlx5v_probe(struct auxiliary_device *adev,
        mgtdev->mgtdev.device = mdev->device;
        mgtdev->mgtdev.id_table = id_table;
        mgtdev->mgtdev.config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) |
-                                         BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
+                                         BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP) |
+                                         BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU);
        mgtdev->mgtdev.max_supported_vqs =
                MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues) + 1;
        mgtdev->mgtdev.supported_features = get_supported_features(mdev);
index 1ea525433a5ca17781c2f8180a2d6ae6c77842ce..2b75c00b10054d2242b6b7b51c7bc07bc461dd8e 100644 (file)
@@ -232,7 +232,7 @@ static int vdpa_name_match(struct device *dev, const void *data)
        return (strcmp(dev_name(&vdev->dev), data) == 0);
 }
 
-static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs)
+static int __vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
 {
        struct device *dev;
 
@@ -257,7 +257,7 @@ static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs)
  *
  * Return: Returns an error when fail to add device to vDPA bus
  */
-int _vdpa_register_device(struct vdpa_device *vdev, int nvqs)
+int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
 {
        if (!vdev->mdev)
                return -EINVAL;
@@ -274,7 +274,7 @@ EXPORT_SYMBOL_GPL(_vdpa_register_device);
  *
  * Return: Returns an error when fail to add to vDPA bus
  */
-int vdpa_register_device(struct vdpa_device *vdev, int nvqs)
+int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
 {
        int err;
 
index b7bb16f92ac6282a9965a5aa3b6709b4e382c999..06b6f3594a1316e020dc294a8ac102d009504e31 100644 (file)
@@ -36,6 +36,10 @@ static bool nointxmask;
 static bool disable_vga;
 static bool disable_idle_d3;
 
+/* List of PF's that vfio_pci_core_sriov_configure() has been called on */
+static DEFINE_MUTEX(vfio_pci_sriov_pfs_mutex);
+static LIST_HEAD(vfio_pci_sriov_pfs);
+
 static inline bool vfio_vga_disabled(void)
 {
 #ifdef CONFIG_VFIO_PCI_VGA
@@ -434,47 +438,17 @@ out:
 }
 EXPORT_SYMBOL_GPL(vfio_pci_core_disable);
 
-static struct vfio_pci_core_device *get_pf_vdev(struct vfio_pci_core_device *vdev)
-{
-       struct pci_dev *physfn = pci_physfn(vdev->pdev);
-       struct vfio_device *pf_dev;
-
-       if (!vdev->pdev->is_virtfn)
-               return NULL;
-
-       pf_dev = vfio_device_get_from_dev(&physfn->dev);
-       if (!pf_dev)
-               return NULL;
-
-       if (pci_dev_driver(physfn) != pci_dev_driver(vdev->pdev)) {
-               vfio_device_put(pf_dev);
-               return NULL;
-       }
-
-       return container_of(pf_dev, struct vfio_pci_core_device, vdev);
-}
-
-static void vfio_pci_vf_token_user_add(struct vfio_pci_core_device *vdev, int val)
-{
-       struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev);
-
-       if (!pf_vdev)
-               return;
-
-       mutex_lock(&pf_vdev->vf_token->lock);
-       pf_vdev->vf_token->users += val;
-       WARN_ON(pf_vdev->vf_token->users < 0);
-       mutex_unlock(&pf_vdev->vf_token->lock);
-
-       vfio_device_put(&pf_vdev->vdev);
-}
-
 void vfio_pci_core_close_device(struct vfio_device *core_vdev)
 {
        struct vfio_pci_core_device *vdev =
                container_of(core_vdev, struct vfio_pci_core_device, vdev);
 
-       vfio_pci_vf_token_user_add(vdev, -1);
+       if (vdev->sriov_pf_core_dev) {
+               mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
+               WARN_ON(!vdev->sriov_pf_core_dev->vf_token->users);
+               vdev->sriov_pf_core_dev->vf_token->users--;
+               mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
+       }
        vfio_spapr_pci_eeh_release(vdev->pdev);
        vfio_pci_core_disable(vdev);
 
@@ -495,7 +469,12 @@ void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev)
 {
        vfio_pci_probe_mmaps(vdev);
        vfio_spapr_pci_eeh_open(vdev->pdev);
-       vfio_pci_vf_token_user_add(vdev, 1);
+
+       if (vdev->sriov_pf_core_dev) {
+               mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
+               vdev->sriov_pf_core_dev->vf_token->users++;
+               mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
+       }
 }
 EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable);
 
@@ -1583,11 +1562,8 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
         *
         * If the VF token is provided but unused, an error is generated.
         */
-       if (!vdev->pdev->is_virtfn && !vdev->vf_token && !vf_token)
-               return 0; /* No VF token provided or required */
-
        if (vdev->pdev->is_virtfn) {
-               struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev);
+               struct vfio_pci_core_device *pf_vdev = vdev->sriov_pf_core_dev;
                bool match;
 
                if (!pf_vdev) {
@@ -1600,7 +1576,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
                }
 
                if (!vf_token) {
-                       vfio_device_put(&pf_vdev->vdev);
                        pci_info_ratelimited(vdev->pdev,
                                "VF token required to access device\n");
                        return -EACCES;
@@ -1610,8 +1585,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
                match = uuid_equal(uuid, &pf_vdev->vf_token->uuid);
                mutex_unlock(&pf_vdev->vf_token->lock);
 
-               vfio_device_put(&pf_vdev->vdev);
-
                if (!match) {
                        pci_info_ratelimited(vdev->pdev,
                                "Incorrect VF token provided for device\n");
@@ -1732,8 +1705,30 @@ static int vfio_pci_bus_notifier(struct notifier_block *nb,
 static int vfio_pci_vf_init(struct vfio_pci_core_device *vdev)
 {
        struct pci_dev *pdev = vdev->pdev;
+       struct vfio_pci_core_device *cur;
+       struct pci_dev *physfn;
        int ret;
 
+       if (pdev->is_virtfn) {
+               /*
+                * If this VF was created by our vfio_pci_core_sriov_configure()
+                * then we can find the PF vfio_pci_core_device now, and due to
+                * the locking in pci_disable_sriov() it cannot change until
+                * this VF device driver is removed.
+                */
+               physfn = pci_physfn(vdev->pdev);
+               mutex_lock(&vfio_pci_sriov_pfs_mutex);
+               list_for_each_entry(cur, &vfio_pci_sriov_pfs, sriov_pfs_item) {
+                       if (cur->pdev == physfn) {
+                               vdev->sriov_pf_core_dev = cur;
+                               break;
+                       }
+               }
+               mutex_unlock(&vfio_pci_sriov_pfs_mutex);
+               return 0;
+       }
+
+       /* Not a SRIOV PF */
        if (!pdev->is_physfn)
                return 0;
 
@@ -1805,6 +1800,7 @@ void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev,
        INIT_LIST_HEAD(&vdev->ioeventfds_list);
        mutex_init(&vdev->vma_lock);
        INIT_LIST_HEAD(&vdev->vma_list);
+       INIT_LIST_HEAD(&vdev->sriov_pfs_item);
        init_rwsem(&vdev->memory_lock);
 }
 EXPORT_SYMBOL_GPL(vfio_pci_core_init_device);
@@ -1896,7 +1892,7 @@ void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev)
 {
        struct pci_dev *pdev = vdev->pdev;
 
-       pci_disable_sriov(pdev);
+       vfio_pci_core_sriov_configure(pdev, 0);
 
        vfio_unregister_group_dev(&vdev->vdev);
 
@@ -1935,21 +1931,49 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_aer_err_detected);
 
 int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
 {
+       struct vfio_pci_core_device *vdev;
        struct vfio_device *device;
        int ret = 0;
 
+       device_lock_assert(&pdev->dev);
+
        device = vfio_device_get_from_dev(&pdev->dev);
        if (!device)
                return -ENODEV;
 
-       if (nr_virtfn == 0)
-               pci_disable_sriov(pdev);
-       else
+       vdev = container_of(device, struct vfio_pci_core_device, vdev);
+
+       if (nr_virtfn) {
+               mutex_lock(&vfio_pci_sriov_pfs_mutex);
+               /*
+                * The thread that adds the vdev to the list is the only thread
+                * that gets to call pci_enable_sriov() and we will only allow
+                * it to be called once without going through
+                * pci_disable_sriov()
+                */
+               if (!list_empty(&vdev->sriov_pfs_item)) {
+                       ret = -EINVAL;
+                       goto out_unlock;
+               }
+               list_add_tail(&vdev->sriov_pfs_item, &vfio_pci_sriov_pfs);
+               mutex_unlock(&vfio_pci_sriov_pfs_mutex);
                ret = pci_enable_sriov(pdev, nr_virtfn);
+               if (ret)
+                       goto out_del;
+               ret = nr_virtfn;
+               goto out_put;
+       }
 
-       vfio_device_put(device);
+       pci_disable_sriov(pdev);
 
-       return ret < 0 ? ret : nr_virtfn;
+out_del:
+       mutex_lock(&vfio_pci_sriov_pfs_mutex);
+       list_del_init(&vdev->sriov_pfs_item);
+out_unlock:
+       mutex_unlock(&vfio_pci_sriov_pfs_mutex);
+out_put:
+       vfio_device_put(device);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure);
 
index 40b098320b2a7242a33acb3b0215ed6aab4f41e4..5829cf2d0552d65c960faead923a3723fbf5b400 100644 (file)
@@ -62,8 +62,12 @@ int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb,
         */
        if (start == 0 && last == ULONG_MAX) {
                u64 mid = last / 2;
+               int err = vhost_iotlb_add_range_ctx(iotlb, start, mid, addr,
+                               perm, opaque);
+
+               if (err)
+                       return err;
 
-               vhost_iotlb_add_range_ctx(iotlb, start, mid, addr, perm, opaque);
                addr += mid + 1;
                start = mid + 1;
        }
index ec5249e8c32d9d31efd707163632d8253200aa59..4c2f0bd062856acca718bc988763159ff2ee094b 100644 (file)
@@ -42,7 +42,7 @@ struct vhost_vdpa {
        struct device dev;
        struct cdev cdev;
        atomic_t opened;
-       int nvqs;
+       u32 nvqs;
        int virtio_id;
        int minor;
        struct eventfd_ctx *config_ctx;
@@ -97,8 +97,11 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
                return;
 
        irq = ops->get_vq_irq(vdpa, qid);
+       if (irq < 0)
+               return;
+
        irq_bypass_unregister_producer(&vq->call_ctx.producer);
-       if (!vq->call_ctx.ctx || irq < 0)
+       if (!vq->call_ctx.ctx)
                return;
 
        vq->call_ctx.producer.token = vq->call_ctx.ctx;
@@ -158,7 +161,8 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
        struct vdpa_device *vdpa = v->vdpa;
        const struct vdpa_config_ops *ops = vdpa->config;
        u8 status, status_old;
-       int ret, nvqs = v->nvqs;
+       u32 nvqs = v->nvqs;
+       int ret;
        u16 i;
 
        if (copy_from_user(&status, statusp, sizeof(status)))
@@ -355,6 +359,30 @@ static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
        return 0;
 }
 
+static long vhost_vdpa_get_config_size(struct vhost_vdpa *v, u32 __user *argp)
+{
+       struct vdpa_device *vdpa = v->vdpa;
+       const struct vdpa_config_ops *ops = vdpa->config;
+       u32 size;
+
+       size = ops->get_config_size(vdpa);
+
+       if (copy_to_user(argp, &size, sizeof(size)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp)
+{
+       struct vdpa_device *vdpa = v->vdpa;
+
+       if (copy_to_user(argp, &vdpa->nvqs, sizeof(vdpa->nvqs)))
+               return -EFAULT;
+
+       return 0;
+}
+
 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
                                   void __user *argp)
 {
@@ -492,6 +520,12 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
        case VHOST_VDPA_GET_IOVA_RANGE:
                r = vhost_vdpa_get_iova_range(v, argp);
                break;
+       case VHOST_VDPA_GET_CONFIG_SIZE:
+               r = vhost_vdpa_get_config_size(v, argp);
+               break;
+       case VHOST_VDPA_GET_VQS_COUNT:
+               r = vhost_vdpa_get_vqs_count(v, argp);
+               break;
        default:
                r = vhost_dev_ioctl(&v->vdev, cmd, argp);
                if (r == -ENOIOCTLCMD)
@@ -948,7 +982,8 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
        struct vhost_vdpa *v;
        struct vhost_dev *dev;
        struct vhost_virtqueue **vqs;
-       int nvqs, i, r, opened;
+       int r, opened;
+       u32 i, nvqs;
 
        v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
 
@@ -1001,7 +1036,7 @@ err:
 
 static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
 {
-       int i;
+       u32 i;
 
        for (i = 0; i < v->nvqs; i++)
                vhost_vdpa_unsetup_vq_irq(v, i);
index 1768362115c6bb4f81e8faaca2ee713f21c67711..d02173fb290c39146634e0d08e14ea123ae174f5 100644 (file)
@@ -2550,8 +2550,9 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
                       &vq->avail->idx, r);
                return false;
        }
+       vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
 
-       return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
+       return vq->avail_idx != vq->last_avail_idx;
 }
 EXPORT_SYMBOL_GPL(vhost_enable_notify);
 
index 34d6bb1bf82eed3717eb78a3997e95408e2eb4bd..a6bb0e4382167e121c49bfeb0445197eab5ecea4 100644 (file)
@@ -1579,7 +1579,14 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
                         * If it's not a platform device, at least print a warning. A
                         * fix would add code to remove the device from the system.
                         */
-                       if (dev_is_platform(device)) {
+                       if (!device) {
+                               /* TODO: Represent each OF framebuffer as its own
+                                * device in the device hierarchy. For now, offb
+                                * doesn't have such a device, so unregister the
+                                * framebuffer as before without warning.
+                                */
+                               do_unregister_framebuffer(registered_fb[i]);
+                       } else if (dev_is_platform(device)) {
                                registered_fb[i]->forced_out = true;
                                platform_device_unregister(to_platform_device(device));
                        } else {
index 0ae1a39f2e28134845c1f5ad957b51753c58c7d7..a1c467a0e9f719665fc02fa559d5c94545e5725f 100644 (file)
@@ -78,6 +78,7 @@ static void vmgenid_notify(struct acpi_device *device, u32 event)
 }
 
 static const struct acpi_device_id vmgenid_ids[] = {
+       { "VMGENCTR", 0 },
        { "VM_GEN_COUNTER", 0 },
        { }
 };
index 492fc26f0b65073bb39b54f2c649605c73cca3e1..b5adf6abd241e8c254215449052cb73c18dd07a6 100644 (file)
@@ -105,7 +105,7 @@ config VIRTIO_BALLOON
 
 config VIRTIO_MEM
        tristate "Virtio mem driver"
-       depends on X86_64
+       depends on X86_64 || ARM64
        depends on VIRTIO
        depends on MEMORY_HOTPLUG
        depends on MEMORY_HOTREMOVE
@@ -115,8 +115,9 @@ config VIRTIO_MEM
         This driver provides access to virtio-mem paravirtualized memory
         devices, allowing to hotplug and hotunplug memory.
 
-        This driver was only tested under x86-64, but should theoretically
-        work on all architectures that support memory hotplug and hotremove.
+        This driver was only tested under x86-64 and arm64, but should
+        theoretically work on all architectures that support memory hotplug
+        and hotremove.
 
         If unsure, say M.
 
index fdbde1db5ec59f7cee313871d7ed5917d0798d20..d724f676608ba34a973803eb8643bf3bba604262 100644 (file)
@@ -24,46 +24,17 @@ MODULE_PARM_DESC(force_legacy,
                 "Force legacy mode for transitional virtio 1 devices");
 #endif
 
-/* disable irq handlers */
-void vp_disable_cbs(struct virtio_device *vdev)
+/* wait for pending irq handlers */
+void vp_synchronize_vectors(struct virtio_device *vdev)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
        int i;
 
-       if (vp_dev->intx_enabled) {
-               /*
-                * The below synchronize() guarantees that any
-                * interrupt for this line arriving after
-                * synchronize_irq() has completed is guaranteed to see
-                * intx_soft_enabled == false.
-                */
-               WRITE_ONCE(vp_dev->intx_soft_enabled, false);
+       if (vp_dev->intx_enabled)
                synchronize_irq(vp_dev->pci_dev->irq);
-       }
-
-       for (i = 0; i < vp_dev->msix_vectors; ++i)
-               disable_irq(pci_irq_vector(vp_dev->pci_dev, i));
-}
-
-/* enable irq handlers */
-void vp_enable_cbs(struct virtio_device *vdev)
-{
-       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-       int i;
-
-       if (vp_dev->intx_enabled) {
-               disable_irq(vp_dev->pci_dev->irq);
-               /*
-                * The above disable_irq() provides TSO ordering and
-                * as such promotes the below store to store-release.
-                */
-               WRITE_ONCE(vp_dev->intx_soft_enabled, true);
-               enable_irq(vp_dev->pci_dev->irq);
-               return;
-       }
 
        for (i = 0; i < vp_dev->msix_vectors; ++i)
-               enable_irq(pci_irq_vector(vp_dev->pci_dev, i));
+               synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
 }
 
 /* the notify function used when creating a virt queue */
@@ -113,9 +84,6 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
        struct virtio_pci_device *vp_dev = opaque;
        u8 isr;
 
-       if (!READ_ONCE(vp_dev->intx_soft_enabled))
-               return IRQ_NONE;
-
        /* reading the ISR has the effect of also clearing it so it's very
         * important to save off the value. */
        isr = ioread8(vp_dev->isr);
@@ -173,8 +141,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
        snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
                 "%s-config", name);
        err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
-                         vp_config_changed, IRQF_NO_AUTOEN,
-                         vp_dev->msix_names[v],
+                         vp_config_changed, 0, vp_dev->msix_names[v],
                          vp_dev);
        if (err)
                goto error;
@@ -193,8 +160,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
                snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
                         "%s-virtqueues", name);
                err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
-                                 vp_vring_interrupt, IRQF_NO_AUTOEN,
-                                 vp_dev->msix_names[v],
+                                 vp_vring_interrupt, 0, vp_dev->msix_names[v],
                                  vp_dev);
                if (err)
                        goto error;
@@ -371,7 +337,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
                         "%s-%s",
                         dev_name(&vp_dev->vdev.dev), names[i]);
                err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
-                                 vring_interrupt, IRQF_NO_AUTOEN,
+                                 vring_interrupt, 0,
                                  vp_dev->msix_names[msix_vec],
                                  vqs[i]);
                if (err)
index 23f6c5c678d5e3e411726f2ebe1d3a009e4f990c..eb17a29fc7ef1f6cbc3c93a390d03a80f0076fc6 100644 (file)
@@ -63,7 +63,6 @@ struct virtio_pci_device {
        /* MSI-X support */
        int msix_enabled;
        int intx_enabled;
-       bool intx_soft_enabled;
        cpumask_var_t *msix_affinity_masks;
        /* Name strings for interrupts. This size should be enough,
         * and I'm too lazy to allocate each name separately. */
@@ -102,10 +101,8 @@ static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
        return container_of(vdev, struct virtio_pci_device, vdev);
 }
 
-/* disable irq handlers */
-void vp_disable_cbs(struct virtio_device *vdev);
-/* enable irq handlers */
-void vp_enable_cbs(struct virtio_device *vdev);
+/* wait for pending irq handlers */
+void vp_synchronize_vectors(struct virtio_device *vdev);
 /* the notify function used when creating a virt queue */
 bool vp_notify(struct virtqueue *vq);
 /* the config->del_vqs() implementation */
index 34141b9abe278bad93dcaa676c025c61752445c6..6f4e34ce96b819049375c7e45c5e40fc22857be2 100644 (file)
@@ -98,8 +98,8 @@ static void vp_reset(struct virtio_device *vdev)
        /* Flush out the status write, and flush in device writes,
         * including MSi-X interrupts, if any. */
        vp_legacy_get_status(&vp_dev->ldev);
-       /* Disable VQ/configuration callbacks. */
-       vp_disable_cbs(vdev);
+       /* Flush pending VQ/configuration callbacks. */
+       vp_synchronize_vectors(vdev);
 }
 
 static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
@@ -185,7 +185,6 @@ static void del_vq(struct virtio_pci_vq_info *info)
 }
 
 static const struct virtio_config_ops virtio_pci_config_ops = {
-       .enable_cbs     = vp_enable_cbs,
        .get            = vp_get,
        .set            = vp_set,
        .get_status     = vp_get_status,
index 5455bc041fb6916bf265d45565905668fbdf9cf3..a2671a20ef779f9cec805ae53438890911c4dea1 100644 (file)
@@ -172,8 +172,8 @@ static void vp_reset(struct virtio_device *vdev)
         */
        while (vp_modern_get_status(mdev))
                msleep(1);
-       /* Disable VQ/configuration callbacks. */
-       vp_disable_cbs(vdev);
+       /* Flush pending VQ/configuration callbacks. */
+       vp_synchronize_vectors(vdev);
 }
 
 static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
@@ -293,7 +293,7 @@ static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id,
 
        for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); pos > 0;
             pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
-               u8 type, cap_len, id;
+               u8 type, cap_len, id, res_bar;
                u32 tmp32;
                u64 res_offset, res_length;
 
@@ -315,9 +315,14 @@ static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id,
                if (id != required_id)
                        continue;
 
-               /* Type, and ID match, looks good */
                pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
-                                                        bar), bar);
+                                                        bar), &res_bar);
+               if (res_bar >= PCI_STD_NUM_BARS)
+                       continue;
+
+               /* Type and ID match, and the BAR value isn't reserved.
+                * Looks good.
+                */
 
                /* Read the lower 32bit of length and offset */
                pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap,
@@ -337,6 +342,7 @@ static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id,
                                                     length_hi), &tmp32);
                res_length |= ((u64)tmp32) << 32;
 
+               *bar = res_bar;
                *offset = res_offset;
                *len = res_length;
 
@@ -380,7 +386,6 @@ static bool vp_get_shm_region(struct virtio_device *vdev,
 }
 
 static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
-       .enable_cbs     = vp_enable_cbs,
        .get            = NULL,
        .set            = NULL,
        .generation     = vp_generation,
@@ -398,7 +403,6 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
 };
 
 static const struct virtio_config_ops virtio_pci_config_ops = {
-       .enable_cbs     = vp_enable_cbs,
        .get            = vp_get,
        .set            = vp_set,
        .generation     = vp_generation,
index e8b3ff2b9fbc223af9cf0a8607d364a2fb9a44f4..591738ad3d565bc4d26b053d7cc2b42b70eff0cb 100644 (file)
@@ -35,6 +35,13 @@ vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
        pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
                              &length);
 
+       /* Check if the BAR may have changed since we requested the region. */
+       if (bar >= PCI_STD_NUM_BARS || !(mdev->modern_bars & (1 << bar))) {
+               dev_err(&dev->dev,
+                       "virtio_pci: bar unexpectedly changed to %u\n", bar);
+               return NULL;
+       }
+
        if (length <= start) {
                dev_err(&dev->dev,
                        "virtio_pci: bad capability len %u (>%u expected)\n",
@@ -120,7 +127,7 @@ static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
                                     &bar);
 
                /* Ignore structures with reserved BAR values */
-               if (bar > 0x5)
+               if (bar >= PCI_STD_NUM_BARS)
                        continue;
 
                if (type == cfg_type) {
index 962f1477b1fabd9fbed81696956cc7e978ced582..cfb028ca238ebf1fbcdc5b11b5870aa78956361a 100644 (file)
@@ -379,19 +379,11 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
 
        flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
 
-       if (flags & VRING_DESC_F_INDIRECT) {
-               dma_unmap_single(vring_dma_dev(vq),
-                                virtio64_to_cpu(vq->vq.vdev, desc->addr),
-                                virtio32_to_cpu(vq->vq.vdev, desc->len),
-                                (flags & VRING_DESC_F_WRITE) ?
-                                DMA_FROM_DEVICE : DMA_TO_DEVICE);
-       } else {
-               dma_unmap_page(vring_dma_dev(vq),
-                              virtio64_to_cpu(vq->vq.vdev, desc->addr),
-                              virtio32_to_cpu(vq->vq.vdev, desc->len),
-                              (flags & VRING_DESC_F_WRITE) ?
-                              DMA_FROM_DEVICE : DMA_TO_DEVICE);
-       }
+       dma_unmap_page(vring_dma_dev(vq),
+                      virtio64_to_cpu(vq->vq.vdev, desc->addr),
+                      virtio32_to_cpu(vq->vq.vdev, desc->len),
+                      (flags & VRING_DESC_F_WRITE) ?
+                      DMA_FROM_DEVICE : DMA_TO_DEVICE);
 }
 
 static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
@@ -984,24 +976,24 @@ static struct virtqueue *vring_create_virtqueue_split(
  * Packed ring specific functions - *_packed().
  */
 
-static void vring_unmap_state_packed(const struct vring_virtqueue *vq,
-                                    struct vring_desc_extra *state)
+static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
+                                    struct vring_desc_extra *extra)
 {
        u16 flags;
 
        if (!vq->use_dma_api)
                return;
 
-       flags = state->flags;
+       flags = extra->flags;
 
        if (flags & VRING_DESC_F_INDIRECT) {
                dma_unmap_single(vring_dma_dev(vq),
-                                state->addr, state->len,
+                                extra->addr, extra->len,
                                 (flags & VRING_DESC_F_WRITE) ?
                                 DMA_FROM_DEVICE : DMA_TO_DEVICE);
        } else {
                dma_unmap_page(vring_dma_dev(vq),
-                              state->addr, state->len,
+                              extra->addr, extra->len,
                               (flags & VRING_DESC_F_WRITE) ?
                               DMA_FROM_DEVICE : DMA_TO_DEVICE);
        }
@@ -1017,19 +1009,11 @@ static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
 
        flags = le16_to_cpu(desc->flags);
 
-       if (flags & VRING_DESC_F_INDIRECT) {
-               dma_unmap_single(vring_dma_dev(vq),
-                                le64_to_cpu(desc->addr),
-                                le32_to_cpu(desc->len),
-                                (flags & VRING_DESC_F_WRITE) ?
-                                DMA_FROM_DEVICE : DMA_TO_DEVICE);
-       } else {
-               dma_unmap_page(vring_dma_dev(vq),
-                              le64_to_cpu(desc->addr),
-                              le32_to_cpu(desc->len),
-                              (flags & VRING_DESC_F_WRITE) ?
-                              DMA_FROM_DEVICE : DMA_TO_DEVICE);
-       }
+       dma_unmap_page(vring_dma_dev(vq),
+                      le64_to_cpu(desc->addr),
+                      le32_to_cpu(desc->len),
+                      (flags & VRING_DESC_F_WRITE) ?
+                      DMA_FROM_DEVICE : DMA_TO_DEVICE);
 }
 
 static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
@@ -1303,8 +1287,7 @@ unmap_release:
        for (n = 0; n < total_sg; n++) {
                if (i == err_idx)
                        break;
-               vring_unmap_state_packed(vq,
-                                        &vq->packed.desc_extra[curr]);
+               vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]);
                curr = vq->packed.desc_extra[curr].next;
                i++;
                if (i >= vq->packed.vring.num)
@@ -1383,8 +1366,8 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
        if (unlikely(vq->use_dma_api)) {
                curr = id;
                for (i = 0; i < state->num; i++) {
-                       vring_unmap_state_packed(vq,
-                               &vq->packed.desc_extra[curr]);
+                       vring_unmap_extra_packed(vq,
+                                                &vq->packed.desc_extra[curr]);
                        curr = vq->packed.desc_extra[curr].next;
                }
        }
index 085f5a4ad06c10eb711b5c41047f49fa6b741b69..c4e82a8d863f8dded3bb044df5ef007ac0132efe 100644 (file)
@@ -1779,7 +1779,7 @@ config BCM7038_WDT
        tristate "BCM63xx/BCM7038 Watchdog"
        select WATCHDOG_CORE
        depends on HAS_IOMEM
-       depends on ARCH_BRCMSTB || BMIPS_GENERIC || BCM63XX || COMPILE_TEST
+       depends on ARCH_BCM4908 || ARCH_BRCMSTB || BMIPS_GENERIC || BCM63XX || COMPILE_TEST
        help
          Watchdog driver for the built-in hardware in Broadcom 7038 and
          later SoCs used in set-top boxes.  BCM7038 was made public
index 436571b6fc7941ff24249f6277b8096fc457b6bc..bd06622813eb4ba5d7ef360f93b4dc954c260472 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/watchdog.h>
 
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+                               __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
 struct aspeed_wdt {
        struct watchdog_device  wdd;
        void __iomem            *base;
@@ -266,6 +271,8 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
        wdt->wdd.timeout = WDT_DEFAULT_TIMEOUT;
        watchdog_init_timeout(&wdt->wdd, 0, dev);
 
+       watchdog_set_nowayout(&wdt->wdd, nowayout);
+
        np = dev->of_node;
 
        ofdid = of_match_node(aspeed_wdt_of_table, np);
index 51bfb796898b69a9a7b42235a64b7a712c3e3413..d0c5d47ddede26f1e02c34c9250a388096598669 100644 (file)
@@ -66,6 +66,7 @@ struct imx2_wdt_device {
        struct watchdog_device wdog;
        bool ext_reset;
        bool clk_is_on;
+       bool no_ping;
 };
 
 static bool nowayout = WATCHDOG_NOWAYOUT;
@@ -312,12 +313,18 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
 
        wdev->ext_reset = of_property_read_bool(dev->of_node,
                                                "fsl,ext-reset-output");
+       /*
+        * The i.MX7D doesn't support low power mode, so we need to ping the watchdog
+        * during suspend.
+        */
+       wdev->no_ping = !of_device_is_compatible(dev->of_node, "fsl,imx7d-wdt");
        platform_set_drvdata(pdev, wdog);
        watchdog_set_drvdata(wdog, wdev);
        watchdog_set_nowayout(wdog, nowayout);
        watchdog_set_restart_priority(wdog, 128);
        watchdog_init_timeout(wdog, timeout, dev);
-       watchdog_stop_ping_on_suspend(wdog);
+       if (wdev->no_ping)
+               watchdog_stop_ping_on_suspend(wdog);
 
        if (imx2_wdt_is_running(wdev)) {
                imx2_wdt_set_timeout(wdog, wdog->timeout);
@@ -366,9 +373,11 @@ static int __maybe_unused imx2_wdt_suspend(struct device *dev)
                imx2_wdt_ping(wdog);
        }
 
-       clk_disable_unprepare(wdev->clk);
+       if (wdev->no_ping) {
+               clk_disable_unprepare(wdev->clk);
 
-       wdev->clk_is_on = false;
+               wdev->clk_is_on = false;
+       }
 
        return 0;
 }
@@ -380,11 +389,14 @@ static int __maybe_unused imx2_wdt_resume(struct device *dev)
        struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
        int ret;
 
-       ret = clk_prepare_enable(wdev->clk);
-       if (ret)
-               return ret;
+       if (wdev->no_ping) {
+               ret = clk_prepare_enable(wdev->clk);
 
-       wdev->clk_is_on = true;
+               if (ret)
+                       return ret;
+
+               wdev->clk_is_on = true;
+       }
 
        if (watchdog_active(wdog) && !imx2_wdt_is_running(wdev)) {
                /*
@@ -407,6 +419,7 @@ static SIMPLE_DEV_PM_OPS(imx2_wdt_pm_ops, imx2_wdt_suspend,
 
 static const struct of_device_id imx2_wdt_dt_ids[] = {
        { .compatible = "fsl,imx21-wdt", },
+       { .compatible = "fsl,imx7d-wdt", },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, imx2_wdt_dt_ids);
index 31b03fa713412dd0b06e770efddb7d950c583e09..281a48d9889fcc5ed889587d76b1c1f39de0bfba 100644 (file)
@@ -84,10 +84,24 @@ static int ixp4xx_wdt_set_timeout(struct watchdog_device *wdd,
        return 0;
 }
 
+static int ixp4xx_wdt_restart(struct watchdog_device *wdd,
+                              unsigned long action, void *data)
+{
+       struct ixp4xx_wdt *iwdt = to_ixp4xx_wdt(wdd);
+
+       __raw_writel(IXP4XX_WDT_KEY, iwdt->base + IXP4XX_OSWK_OFFSET);
+       __raw_writel(0, iwdt->base + IXP4XX_OSWT_OFFSET);
+       __raw_writel(IXP4XX_WDT_COUNT_ENABLE | IXP4XX_WDT_RESET_ENABLE,
+                    iwdt->base + IXP4XX_OSWE_OFFSET);
+
+       return 0;
+}
+
 static const struct watchdog_ops ixp4xx_wdt_ops = {
        .start = ixp4xx_wdt_start,
        .stop = ixp4xx_wdt_stop,
        .set_timeout = ixp4xx_wdt_set_timeout,
+       .restart = ixp4xx_wdt_restart,
        .owner = THIS_MODULE,
 };
 
index 127eefc9161d2e3cba02eef4878f9de930cf86c9..e25e6bf4647f804edf26187b439b6dcbb37852c0 100644 (file)
@@ -238,8 +238,10 @@ static int armada370_start(struct watchdog_device *wdt_dev)
        atomic_io_modify(dev->reg + TIMER_A370_STATUS, WDT_A370_EXPIRED, 0);
 
        /* Enable watchdog timer */
-       atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit,
-                                               dev->data->wdt_enable_bit);
+       reg = dev->data->wdt_enable_bit;
+       if (dev->wdt.info->options & WDIOF_PRETIMEOUT)
+               reg |= TIMER1_ENABLE_BIT;
+       atomic_io_modify(dev->reg + TIMER_CTRL, reg, reg);
 
        /* Enable reset on watchdog */
        reg = readl(dev->rstout);
@@ -312,7 +314,7 @@ static int armada375_stop(struct watchdog_device *wdt_dev)
 static int armada370_stop(struct watchdog_device *wdt_dev)
 {
        struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
-       u32 reg;
+       u32 reg, mask;
 
        /* Disable reset on watchdog */
        reg = readl(dev->rstout);
@@ -320,7 +322,10 @@ static int armada370_stop(struct watchdog_device *wdt_dev)
        writel(reg, dev->rstout);
 
        /* Disable watchdog timer */
-       atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit, 0);
+       mask = dev->data->wdt_enable_bit;
+       if (wdt_dev->info->options & WDIOF_PRETIMEOUT)
+               mask |= TIMER1_ENABLE_BIT;
+       atomic_io_modify(dev->reg + TIMER_CTRL, mask, 0);
 
        return 0;
 }
index 5791198960e6c5bf6d766c7eb2f5450513230897..41d58ea5eb2f40fd0f496c9cebe0c342c176ba54 100644 (file)
@@ -327,6 +327,7 @@ static SIMPLE_DEV_PM_OPS(rwdt_pm_ops, rwdt_suspend, rwdt_resume);
 static const struct of_device_id rwdt_ids[] = {
        { .compatible = "renesas,rcar-gen2-wdt", },
        { .compatible = "renesas,rcar-gen3-wdt", },
+       { .compatible = "renesas,rcar-gen4-wdt", },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, rwdt_ids);
index 117bc2a8eb0a46be764a1ff667d9c7693c997074..db843f82586026be10443dbc4f2b3feec2bae9b7 100644 (file)
@@ -228,6 +228,7 @@ static int rti_wdt_probe(struct platform_device *pdev)
        ret = pm_runtime_get_sync(dev);
        if (ret) {
                pm_runtime_put_noidle(dev);
+               pm_runtime_disable(&pdev->dev);
                return dev_err_probe(dev, ret, "runtime pm failed\n");
        }
 
index dd9a744f82f850c0a70df0e343b6d202e078d109..86ffb58fbc8549448f2323a3c6ccf3e0a13294aa 100644 (file)
@@ -49,7 +49,7 @@
 /* internal variables */
 
 enum tco_reg_layout {
-       sp5100, sb800, efch
+       sp5100, sb800, efch, efch_mmio
 };
 
 struct sp5100_tco {
@@ -86,6 +86,10 @@ static enum tco_reg_layout tco_reg_layout(struct pci_dev *dev)
            dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
            dev->revision < 0x40) {
                return sp5100;
+       } else if (dev->vendor == PCI_VENDOR_ID_AMD &&
+           sp5100_tco_pci->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS &&
+           sp5100_tco_pci->revision >= AMD_ZEN_SMBUS_PCI_REV) {
+               return efch_mmio;
        } else if (dev->vendor == PCI_VENDOR_ID_AMD &&
            ((dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
             dev->revision >= 0x41) ||
@@ -209,6 +213,8 @@ static void tco_timer_enable(struct sp5100_tco *tco)
                                          ~EFCH_PM_WATCHDOG_DISABLE,
                                          EFCH_PM_DECODEEN_SECOND_RES);
                break;
+       default:
+               break;
        }
 }
 
@@ -223,14 +229,195 @@ static u32 sp5100_tco_read_pm_reg32(u8 index)
        return val;
 }
 
+static u32 sp5100_tco_request_region(struct device *dev,
+                                    u32 mmio_addr,
+                                    const char *dev_name)
+{
+       if (!devm_request_mem_region(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE,
+                                    dev_name)) {
+               dev_dbg(dev, "MMIO address 0x%08x already in use\n", mmio_addr);
+               return 0;
+       }
+
+       return mmio_addr;
+}
+
+static u32 sp5100_tco_prepare_base(struct sp5100_tco *tco,
+                                  u32 mmio_addr,
+                                  u32 alt_mmio_addr,
+                                  const char *dev_name)
+{
+       struct device *dev = tco->wdd.parent;
+
+       dev_dbg(dev, "Got 0x%08x from SBResource_MMIO register\n", mmio_addr);
+
+       if (!mmio_addr && !alt_mmio_addr)
+               return -ENODEV;
+
+       /* Check for MMIO address and alternate MMIO address conflicts */
+       if (mmio_addr)
+               mmio_addr = sp5100_tco_request_region(dev, mmio_addr, dev_name);
+
+       if (!mmio_addr && alt_mmio_addr)
+               mmio_addr = sp5100_tco_request_region(dev, alt_mmio_addr, dev_name);
+
+       if (!mmio_addr) {
+               dev_err(dev, "Failed to reserve MMIO or alternate MMIO region\n");
+               return -EBUSY;
+       }
+
+       tco->tcobase = devm_ioremap(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE);
+       if (!tco->tcobase) {
+               dev_err(dev, "MMIO address 0x%08x failed mapping\n", mmio_addr);
+               devm_release_mem_region(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE);
+               return -ENOMEM;
+       }
+
+       dev_info(dev, "Using 0x%08x for watchdog MMIO address\n", mmio_addr);
+
+       return 0;
+}
+
+static int sp5100_tco_timer_init(struct sp5100_tco *tco)
+{
+       struct watchdog_device *wdd = &tco->wdd;
+       struct device *dev = wdd->parent;
+       u32 val;
+
+       val = readl(SP5100_WDT_CONTROL(tco->tcobase));
+       if (val & SP5100_WDT_DISABLED) {
+               dev_err(dev, "Watchdog hardware is disabled\n");
+               return -ENODEV;
+       }
+
+       /*
+        * Save WatchDogFired status, because WatchDogFired flag is
+        * cleared here.
+        */
+       if (val & SP5100_WDT_FIRED)
+               wdd->bootstatus = WDIOF_CARDRESET;
+
+       /* Set watchdog action to reset the system */
+       val &= ~SP5100_WDT_ACTION_RESET;
+       writel(val, SP5100_WDT_CONTROL(tco->tcobase));
+
+       /* Set a reasonable heartbeat before we stop the timer */
+       tco_timer_set_timeout(wdd, wdd->timeout);
+
+       /*
+        * Stop the TCO before we change anything so we don't race with
+        * a zeroed timer.
+        */
+       tco_timer_stop(wdd);
+
+       return 0;
+}
+
+static u8 efch_read_pm_reg8(void __iomem *addr, u8 index)
+{
+       return readb(addr + index);
+}
+
+static void efch_update_pm_reg8(void __iomem *addr, u8 index, u8 reset, u8 set)
+{
+       u8 val;
+
+       val = readb(addr + index);
+       val &= reset;
+       val |= set;
+       writeb(val, addr + index);
+}
+
+static void tco_timer_enable_mmio(void __iomem *addr)
+{
+       efch_update_pm_reg8(addr, EFCH_PM_DECODEEN3,
+                           ~EFCH_PM_WATCHDOG_DISABLE,
+                           EFCH_PM_DECODEEN_SECOND_RES);
+}
+
+static int sp5100_tco_setupdevice_mmio(struct device *dev,
+                                      struct watchdog_device *wdd)
+{
+       struct sp5100_tco *tco = watchdog_get_drvdata(wdd);
+       const char *dev_name = SB800_DEVNAME;
+       u32 mmio_addr = 0, alt_mmio_addr = 0;
+       struct resource *res;
+       void __iomem *addr;
+       int ret;
+       u32 val;
+
+       res = request_mem_region_muxed(EFCH_PM_ACPI_MMIO_PM_ADDR,
+                                      EFCH_PM_ACPI_MMIO_PM_SIZE,
+                                      "sp5100_tco");
+
+       if (!res) {
+               dev_err(dev,
+                       "Memory region 0x%08x already in use\n",
+                       EFCH_PM_ACPI_MMIO_PM_ADDR);
+               return -EBUSY;
+       }
+
+       addr = ioremap(EFCH_PM_ACPI_MMIO_PM_ADDR, EFCH_PM_ACPI_MMIO_PM_SIZE);
+       if (!addr) {
+               dev_err(dev, "Address mapping failed\n");
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       /*
+        * EFCH_PM_DECODEEN_WDT_TMREN is dual purpose. This bitfield
+        * enables sp5100_tco register MMIO space decoding. The bitfield
+        * also starts the timer operation. Enable if not already enabled.
+        */
+       val = efch_read_pm_reg8(addr, EFCH_PM_DECODEEN);
+       if (!(val & EFCH_PM_DECODEEN_WDT_TMREN)) {
+               efch_update_pm_reg8(addr, EFCH_PM_DECODEEN, 0xff,
+                                   EFCH_PM_DECODEEN_WDT_TMREN);
+       }
+
+       /* Error if the timer could not be enabled */
+       val = efch_read_pm_reg8(addr, EFCH_PM_DECODEEN);
+       if (!(val & EFCH_PM_DECODEEN_WDT_TMREN)) {
+               dev_err(dev, "Failed to enable the timer\n");
+               ret = -EFAULT;
+               goto out;
+       }
+
+       mmio_addr = EFCH_PM_WDT_ADDR;
+
+       /* Determine alternate MMIO base address */
+       val = efch_read_pm_reg8(addr, EFCH_PM_ISACONTROL);
+       if (val & EFCH_PM_ISACONTROL_MMIOEN)
+               alt_mmio_addr = EFCH_PM_ACPI_MMIO_ADDR +
+                       EFCH_PM_ACPI_MMIO_WDT_OFFSET;
+
+       ret = sp5100_tco_prepare_base(tco, mmio_addr, alt_mmio_addr, dev_name);
+       if (!ret) {
+               tco_timer_enable_mmio(addr);
+               ret = sp5100_tco_timer_init(tco);
+       }
+
+out:
+       if (addr)
+               iounmap(addr);
+
+       release_resource(res);
+
+       return ret;
+}
+
 static int sp5100_tco_setupdevice(struct device *dev,
                                  struct watchdog_device *wdd)
 {
        struct sp5100_tco *tco = watchdog_get_drvdata(wdd);
        const char *dev_name;
        u32 mmio_addr = 0, val;
+       u32 alt_mmio_addr = 0;
        int ret;
 
+       if (tco->tco_reg_layout == efch_mmio)
+               return sp5100_tco_setupdevice_mmio(dev, wdd);
+
        /* Request the IO ports used by this driver */
        if (!request_muxed_region(SP5100_IO_PM_INDEX_REG,
                                  SP5100_PM_IOPORTS_SIZE, "sp5100_tco")) {
@@ -247,138 +434,55 @@ static int sp5100_tco_setupdevice(struct device *dev,
                dev_name = SP5100_DEVNAME;
                mmio_addr = sp5100_tco_read_pm_reg32(SP5100_PM_WATCHDOG_BASE) &
                                                                0xfffffff8;
+
+               /*
+                * Secondly, find the watchdog timer MMIO address
+                * from SBResource_MMIO register.
+                */
+
+               /* Read SBResource_MMIO from PCI config(PCI_Reg: 9Ch) */
+               pci_read_config_dword(sp5100_tco_pci,
+                                     SP5100_SB_RESOURCE_MMIO_BASE,
+                                     &val);
+
+               /* Verify MMIO is enabled and using bar0 */
+               if ((val & SB800_ACPI_MMIO_MASK) == SB800_ACPI_MMIO_DECODE_EN)
+                       alt_mmio_addr = (val & ~0xfff) + SB800_PM_WDT_MMIO_OFFSET;
                break;
        case sb800:
                dev_name = SB800_DEVNAME;
                mmio_addr = sp5100_tco_read_pm_reg32(SB800_PM_WATCHDOG_BASE) &
                                                                0xfffffff8;
+
+               /* Read SBResource_MMIO from AcpiMmioEn(PM_Reg: 24h) */
+               val = sp5100_tco_read_pm_reg32(SB800_PM_ACPI_MMIO_EN);
+
+               /* Verify MMIO is enabled and using bar0 */
+               if ((val & SB800_ACPI_MMIO_MASK) == SB800_ACPI_MMIO_DECODE_EN)
+                       alt_mmio_addr = (val & ~0xfff) + SB800_PM_WDT_MMIO_OFFSET;
                break;
        case efch:
                dev_name = SB800_DEVNAME;
-               /*
-                * On Family 17h devices, the EFCH_PM_DECODEEN_WDT_TMREN bit of
-                * EFCH_PM_DECODEEN not only enables the EFCH_PM_WDT_ADDR memory
-                * region, it also enables the watchdog itself.
-                */
-               if (boot_cpu_data.x86 == 0x17) {
-                       val = sp5100_tco_read_pm_reg8(EFCH_PM_DECODEEN);
-                       if (!(val & EFCH_PM_DECODEEN_WDT_TMREN)) {
-                               sp5100_tco_update_pm_reg8(EFCH_PM_DECODEEN, 0xff,
-                                                         EFCH_PM_DECODEEN_WDT_TMREN);
-                       }
-               }
                val = sp5100_tco_read_pm_reg8(EFCH_PM_DECODEEN);
                if (val & EFCH_PM_DECODEEN_WDT_TMREN)
                        mmio_addr = EFCH_PM_WDT_ADDR;
+
+               val = sp5100_tco_read_pm_reg8(EFCH_PM_ISACONTROL);
+               if (val & EFCH_PM_ISACONTROL_MMIOEN)
+                       alt_mmio_addr = EFCH_PM_ACPI_MMIO_ADDR +
+                               EFCH_PM_ACPI_MMIO_WDT_OFFSET;
                break;
        default:
                return -ENODEV;
        }
 
-       /* Check MMIO address conflict */
-       if (!mmio_addr ||
-           !devm_request_mem_region(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE,
-                                    dev_name)) {
-               if (mmio_addr)
-                       dev_dbg(dev, "MMIO address 0x%08x already in use\n",
-                               mmio_addr);
-               switch (tco->tco_reg_layout) {
-               case sp5100:
-                       /*
-                        * Secondly, Find the watchdog timer MMIO address
-                        * from SBResource_MMIO register.
-                        */
-                       /* Read SBResource_MMIO from PCI config(PCI_Reg: 9Ch) */
-                       pci_read_config_dword(sp5100_tco_pci,
-                                             SP5100_SB_RESOURCE_MMIO_BASE,
-                                             &mmio_addr);
-                       if ((mmio_addr & (SB800_ACPI_MMIO_DECODE_EN |
-                                         SB800_ACPI_MMIO_SEL)) !=
-                                                 SB800_ACPI_MMIO_DECODE_EN) {
-                               ret = -ENODEV;
-                               goto unreg_region;
-                       }
-                       mmio_addr &= ~0xFFF;
-                       mmio_addr += SB800_PM_WDT_MMIO_OFFSET;
-                       break;
-               case sb800:
-                       /* Read SBResource_MMIO from AcpiMmioEn(PM_Reg: 24h) */
-                       mmio_addr =
-                               sp5100_tco_read_pm_reg32(SB800_PM_ACPI_MMIO_EN);
-                       if ((mmio_addr & (SB800_ACPI_MMIO_DECODE_EN |
-                                         SB800_ACPI_MMIO_SEL)) !=
-                                                 SB800_ACPI_MMIO_DECODE_EN) {
-                               ret = -ENODEV;
-                               goto unreg_region;
-                       }
-                       mmio_addr &= ~0xFFF;
-                       mmio_addr += SB800_PM_WDT_MMIO_OFFSET;
-                       break;
-               case efch:
-                       val = sp5100_tco_read_pm_reg8(EFCH_PM_ISACONTROL);
-                       if (!(val & EFCH_PM_ISACONTROL_MMIOEN)) {
-                               ret = -ENODEV;
-                               goto unreg_region;
-                       }
-                       mmio_addr = EFCH_PM_ACPI_MMIO_ADDR +
-                                   EFCH_PM_ACPI_MMIO_WDT_OFFSET;
-                       break;
-               }
-               dev_dbg(dev, "Got 0x%08x from SBResource_MMIO register\n",
-                       mmio_addr);
-               if (!devm_request_mem_region(dev, mmio_addr,
-                                            SP5100_WDT_MEM_MAP_SIZE,
-                                            dev_name)) {
-                       dev_dbg(dev, "MMIO address 0x%08x already in use\n",
-                               mmio_addr);
-                       ret = -EBUSY;
-                       goto unreg_region;
-               }
-       }
-
-       tco->tcobase = devm_ioremap(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE);
-       if (!tco->tcobase) {
-               dev_err(dev, "failed to get tcobase address\n");
-               ret = -ENOMEM;
-               goto unreg_region;
-       }
-
-       dev_info(dev, "Using 0x%08x for watchdog MMIO address\n", mmio_addr);
-
-       /* Setup the watchdog timer */
-       tco_timer_enable(tco);
-
-       val = readl(SP5100_WDT_CONTROL(tco->tcobase));
-       if (val & SP5100_WDT_DISABLED) {
-               dev_err(dev, "Watchdog hardware is disabled\n");
-               ret = -ENODEV;
-               goto unreg_region;
+       ret = sp5100_tco_prepare_base(tco, mmio_addr, alt_mmio_addr, dev_name);
+       if (!ret) {
+               /* Setup the watchdog timer */
+               tco_timer_enable(tco);
+               ret = sp5100_tco_timer_init(tco);
        }
 
-       /*
-        * Save WatchDogFired status, because WatchDogFired flag is
-        * cleared here.
-        */
-       if (val & SP5100_WDT_FIRED)
-               wdd->bootstatus = WDIOF_CARDRESET;
-       /* Set watchdog action to reset the system */
-       val &= ~SP5100_WDT_ACTION_RESET;
-       writel(val, SP5100_WDT_CONTROL(tco->tcobase));
-
-       /* Set a reasonable heartbeat before we stop the timer */
-       tco_timer_set_timeout(wdd, wdd->timeout);
-
-       /*
-        * Stop the TCO before we change anything so we don't race with
-        * a zeroed timer.
-        */
-       tco_timer_stop(wdd);
-
-       release_region(SP5100_IO_PM_INDEX_REG, SP5100_PM_IOPORTS_SIZE);
-
-       return 0;
-
-unreg_region:
        release_region(SP5100_IO_PM_INDEX_REG, SP5100_PM_IOPORTS_SIZE);
        return ret;
 }
index adf015aa4126f3351ccb1e70682c2fa5599c32c3..6a0986d2c94b7de92618bfeaedca2dbe2a0d0b55 100644 (file)
@@ -58,6 +58,7 @@
 #define SB800_PM_WATCHDOG_SECOND_RES   GENMASK(1, 0)
 #define SB800_ACPI_MMIO_DECODE_EN      BIT(0)
 #define SB800_ACPI_MMIO_SEL            BIT(1)
+#define SB800_ACPI_MMIO_MASK           GENMASK(1, 0)
 
 #define SB800_PM_WDT_MMIO_OFFSET       0xB00
 
 #define EFCH_PM_ISACONTROL_MMIOEN      BIT(1)
 
 #define EFCH_PM_ACPI_MMIO_ADDR         0xfed80000
+#define EFCH_PM_ACPI_MMIO_PM_OFFSET    0x00000300
 #define EFCH_PM_ACPI_MMIO_WDT_OFFSET   0x00000b00
+
+#define EFCH_PM_ACPI_MMIO_PM_ADDR      (EFCH_PM_ACPI_MMIO_ADDR +       \
+                                        EFCH_PM_ACPI_MMIO_PM_OFFSET)
+#define EFCH_PM_ACPI_MMIO_PM_SIZE      8
+#define AMD_ZEN_SMBUS_PCI_REV          0x51
index 3a3d8b5c7ad5900c959c8662da15649abe81e778..54903f3c851eb953d379161e225f0adbba2dd217 100644 (file)
@@ -171,17 +171,17 @@ static int __watchdog_ping(struct watchdog_device *wdd)
 }
 
 /*
- *     watchdog_ping: ping the watchdog.
- *     @wdd: the watchdog device to ping
+ * watchdog_ping - ping the watchdog
+ * @wdd: The watchdog device to ping
  *
- *     The caller must hold wd_data->lock.
+ * If the watchdog has no own ping operation then it needs to be
+ * restarted via the start operation. This wrapper function does
+ * exactly that.
+ * We only ping when the watchdog device is running.
+ * The caller must hold wd_data->lock.
  *
- *     If the watchdog has no own ping operation then it needs to be
- *     restarted via the start operation. This wrapper function does
- *     exactly that.
- *     We only ping when the watchdog device is running.
+ * Return: 0 on success, error otherwise.
  */
-
 static int watchdog_ping(struct watchdog_device *wdd)
 {
        struct watchdog_core_data *wd_data = wdd->wd_data;
@@ -231,16 +231,14 @@ static enum hrtimer_restart watchdog_timer_expired(struct hrtimer *timer)
 }
 
 /*
- *     watchdog_start: wrapper to start the watchdog.
- *     @wdd: the watchdog device to start
+ * watchdog_start - wrapper to start the watchdog
+ * @wdd: The watchdog device to start
  *
- *     The caller must hold wd_data->lock.
+ * Start the watchdog if it is not active and mark it active.
+ * The caller must hold wd_data->lock.
  *
- *     Start the watchdog if it is not active and mark it active.
- *     This function returns zero on success or a negative errno code for
- *     failure.
+ * Return: 0 on success or a negative errno code for failure.
  */
-
 static int watchdog_start(struct watchdog_device *wdd)
 {
        struct watchdog_core_data *wd_data = wdd->wd_data;
@@ -274,17 +272,15 @@ static int watchdog_start(struct watchdog_device *wdd)
 }
 
 /*
- *     watchdog_stop: wrapper to stop the watchdog.
- *     @wdd: the watchdog device to stop
+ * watchdog_stop - wrapper to stop the watchdog
+ * @wdd: The watchdog device to stop
  *
- *     The caller must hold wd_data->lock.
+ * Stop the watchdog if it is still active and unmark it active.
+ * If the 'nowayout' feature was set, the watchdog cannot be stopped.
+ * The caller must hold wd_data->lock.
  *
- *     Stop the watchdog if it is still active and unmark it active.
- *     This function returns zero on success or a negative errno code for
- *     failure.
- *     If the 'nowayout' feature was set, the watchdog cannot be stopped.
+ * Return: 0 on success or a negative errno code for failure.
  */
-
 static int watchdog_stop(struct watchdog_device *wdd)
 {
        int err = 0;
@@ -315,14 +311,14 @@ static int watchdog_stop(struct watchdog_device *wdd)
 }
 
 /*
- *     watchdog_get_status: wrapper to get the watchdog status
- *     @wdd: the watchdog device to get the status from
+ * watchdog_get_status - wrapper to get the watchdog status
+ * @wdd: The watchdog device to get the status from
  *
- *     The caller must hold wd_data->lock.
+ * Get the watchdog's status flags.
+ * The caller must hold wd_data->lock.
  *
- *     Get the watchdog's status flags.
+ * Return: watchdog's status flags.
  */
-
 static unsigned int watchdog_get_status(struct watchdog_device *wdd)
 {
        struct watchdog_core_data *wd_data = wdd->wd_data;
@@ -352,13 +348,14 @@ static unsigned int watchdog_get_status(struct watchdog_device *wdd)
 }
 
 /*
- *     watchdog_set_timeout: set the watchdog timer timeout
- *     @wdd: the watchdog device to set the timeout for
- *     @timeout: timeout to set in seconds
+ * watchdog_set_timeout - set the watchdog timer timeout
+ * @wdd:       The watchdog device to set the timeout for
+ * @timeout:   Timeout to set in seconds
+ *
+ * The caller must hold wd_data->lock.
  *
- *     The caller must hold wd_data->lock.
+ * Return: 0 if successful, error otherwise.
  */
-
 static int watchdog_set_timeout(struct watchdog_device *wdd,
                                                        unsigned int timeout)
 {
@@ -385,11 +382,12 @@ static int watchdog_set_timeout(struct watchdog_device *wdd,
 }
 
 /*
- *     watchdog_set_pretimeout: set the watchdog timer pretimeout
- *     @wdd: the watchdog device to set the timeout for
- *     @timeout: pretimeout to set in seconds
+ * watchdog_set_pretimeout - set the watchdog timer pretimeout
+ * @wdd:       The watchdog device to set the timeout for
+ * @timeout:   pretimeout to set in seconds
+ *
+ * Return: 0 if successful, error otherwise.
  */
-
 static int watchdog_set_pretimeout(struct watchdog_device *wdd,
                                   unsigned int timeout)
 {
@@ -410,15 +408,15 @@ static int watchdog_set_pretimeout(struct watchdog_device *wdd,
 }
 
 /*
- *     watchdog_get_timeleft: wrapper to get the time left before a reboot
- *     @wdd: the watchdog device to get the remaining time from
- *     @timeleft: the time that's left
+ * watchdog_get_timeleft - wrapper to get the time left before a reboot
+ * @wdd:       The watchdog device to get the remaining time from
+ * @timeleft:  The time that's left
  *
- *     The caller must hold wd_data->lock.
+ * Get the time before a watchdog will reboot (if not pinged).
+ * The caller must hold wd_data->lock.
  *
- *     Get the time before a watchdog will reboot (if not pinged).
+ * Return: 0 if successful, error otherwise.
  */
-
 static int watchdog_get_timeleft(struct watchdog_device *wdd,
                                                        unsigned int *timeleft)
 {
@@ -635,14 +633,15 @@ __ATTRIBUTE_GROUPS(wdt);
 #endif
 
 /*
- *     watchdog_ioctl_op: call the watchdog drivers ioctl op if defined
- *     @wdd: the watchdog device to do the ioctl on
- *     @cmd: watchdog command
- *     @arg: argument pointer
+ * watchdog_ioctl_op - call the watchdog drivers ioctl op if defined
+ * @wdd: The watchdog device to do the ioctl on
+ * @cmd: Watchdog command
+ * @arg: Argument pointer
  *
- *     The caller must hold wd_data->lock.
+ * The caller must hold wd_data->lock.
+ *
+ * Return: 0 if successful, error otherwise.
  */
-
 static int watchdog_ioctl_op(struct watchdog_device *wdd, unsigned int cmd,
                                                        unsigned long arg)
 {
@@ -653,17 +652,18 @@ static int watchdog_ioctl_op(struct watchdog_device *wdd, unsigned int cmd,
 }
 
 /*
- *     watchdog_write: writes to the watchdog.
- *     @file: file from VFS
- *     @data: user address of data
- *     @len: length of data
- *     @ppos: pointer to the file offset
+ * watchdog_write - writes to the watchdog
+ * @file:      File from VFS
+ * @data:      User address of data
+ * @len:       Length of data
+ * @ppos:      Pointer to the file offset
  *
- *     A write to a watchdog device is defined as a keepalive ping.
- *     Writing the magic 'V' sequence allows the next close to turn
- *     off the watchdog (if 'nowayout' is not set).
+ * A write to a watchdog device is defined as a keepalive ping.
+ * Writing the magic 'V' sequence allows the next close to turn
+ * off the watchdog (if 'nowayout' is not set).
+ *
+ * Return: @len if successful, error otherwise.
  */
-
 static ssize_t watchdog_write(struct file *file, const char __user *data,
                                                size_t len, loff_t *ppos)
 {
@@ -706,13 +706,15 @@ static ssize_t watchdog_write(struct file *file, const char __user *data,
 }
 
 /*
- *     watchdog_ioctl: handle the different ioctl's for the watchdog device.
- *     @file: file handle to the device
- *     @cmd: watchdog command
- *     @arg: argument pointer
+ * watchdog_ioctl - handle the different ioctl's for the watchdog device
+ * @file:      File handle to the device
+ * @cmd:       Watchdog command
+ * @arg:       Argument pointer
  *
- *     The watchdog API defines a common set of functions for all watchdogs
- *     according to their available features.
+ * The watchdog API defines a common set of functions for all watchdogs
+ * according to their available features.
+ *
+ * Return: 0 if successful, error otherwise.
  */
 
 static long watchdog_ioctl(struct file *file, unsigned int cmd,
@@ -819,15 +821,16 @@ out_ioctl:
 }
 
 /*
- *     watchdog_open: open the /dev/watchdog* devices.
- *     @inode: inode of device
- *     @file: file handle to device
+ * watchdog_open - open the /dev/watchdog* devices
+ * @inode:     Inode of device
+ * @file:      File handle to device
+ *
+ * When the /dev/watchdog* device gets opened, we start the watchdog.
+ * Watch out: the /dev/watchdog device is single open, so we make sure
+ * it can only be opened once.
  *
- *     When the /dev/watchdog* device gets opened, we start the watchdog.
- *     Watch out: the /dev/watchdog device is single open, so we make sure
- *     it can only be opened once.
+ * Return: 0 if successful, error otherwise.
  */
-
 static int watchdog_open(struct inode *inode, struct file *file)
 {
        struct watchdog_core_data *wd_data;
@@ -896,15 +899,16 @@ static void watchdog_core_data_release(struct device *dev)
 }
 
 /*
- *     watchdog_release: release the watchdog device.
- *     @inode: inode of device
- *     @file: file handle to device
+ * watchdog_release - release the watchdog device
+ * @inode:     Inode of device
+ * @file:      File handle to device
+ *
+ * This is the code for when /dev/watchdog gets closed. We will only
+ * stop the watchdog when we have received the magic char (and nowayout
+ * was not set), else the watchdog will keep running.
  *
- *     This is the code for when /dev/watchdog gets closed. We will only
- *     stop the watchdog when we have received the magic char (and nowayout
- *     was not set), else the watchdog will keep running.
+ * Always returns 0.
  */
-
 static int watchdog_release(struct inode *inode, struct file *file)
 {
        struct watchdog_core_data *wd_data = file->private_data;
@@ -977,14 +981,15 @@ static struct class watchdog_class = {
 };
 
 /*
- *     watchdog_cdev_register: register watchdog character device
- *     @wdd: watchdog device
+ * watchdog_cdev_register - register watchdog character device
+ * @wdd: Watchdog device
+ *
+ * Register a watchdog character device including handling the legacy
+ * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
+ * thus we set it up like that.
  *
- *     Register a watchdog character device including handling the legacy
- *     /dev/watchdog node. /dev/watchdog is actually a miscdevice and
- *     thus we set it up like that.
+ * Return: 0 if successful, error otherwise.
  */
-
 static int watchdog_cdev_register(struct watchdog_device *wdd)
 {
        struct watchdog_core_data *wd_data;
@@ -1074,13 +1079,12 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
 }
 
 /*
- *     watchdog_cdev_unregister: unregister watchdog character device
- *     @watchdog: watchdog device
+ * watchdog_cdev_unregister - unregister watchdog character device
+ * @wdd: Watchdog device
  *
- *     Unregister watchdog character device and if needed the legacy
- *     /dev/watchdog device.
+ * Unregister watchdog character device and if needed the legacy
+ * /dev/watchdog device.
  */
-
 static void watchdog_cdev_unregister(struct watchdog_device *wdd)
 {
        struct watchdog_core_data *wd_data = wdd->wd_data;
@@ -1109,15 +1113,16 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
        put_device(&wd_data->dev);
 }
 
-/*
- *     watchdog_dev_register: register a watchdog device
- *     @wdd: watchdog device
+/**
+ * watchdog_dev_register - register a watchdog device
+ * @wdd: Watchdog device
+ *
+ * Register a watchdog device including handling the legacy
+ * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
+ * thus we set it up like that.
  *
- *     Register a watchdog device including handling the legacy
- *     /dev/watchdog node. /dev/watchdog is actually a miscdevice and
- *     thus we set it up like that.
+ * Return: 0 if successful, error otherwise.
  */
-
 int watchdog_dev_register(struct watchdog_device *wdd)
 {
        int ret;
@@ -1133,30 +1138,31 @@ int watchdog_dev_register(struct watchdog_device *wdd)
        return ret;
 }
 
-/*
- *     watchdog_dev_unregister: unregister a watchdog device
- *     @watchdog: watchdog device
+/**
+ * watchdog_dev_unregister - unregister a watchdog device
+ * @wdd: watchdog device
  *
- *     Unregister watchdog device and if needed the legacy
- *     /dev/watchdog device.
+ * Unregister watchdog device and if needed the legacy
+ * /dev/watchdog device.
  */
-
 void watchdog_dev_unregister(struct watchdog_device *wdd)
 {
        watchdog_unregister_pretimeout(wdd);
        watchdog_cdev_unregister(wdd);
 }
 
-/*
- *     watchdog_set_last_hw_keepalive: set last HW keepalive time for watchdog
- *     @wdd: watchdog device
- *     @last_ping_ms: time since last HW heartbeat
+/**
+ * watchdog_set_last_hw_keepalive - set last HW keepalive time for watchdog
+ * @wdd:               Watchdog device
+ * @last_ping_ms:      Time since last HW heartbeat
  *
- *     Adjusts the last known HW keepalive time for a watchdog timer.
- *     This is needed if the watchdog is already running when the probe
- *     function is called, and it can't be pinged immediately. This
- *     function must be called immediately after watchdog registration,
- *     and min_hw_heartbeat_ms must be set for this to be useful.
+ * Adjusts the last known HW keepalive time for a watchdog timer.
+ * This is needed if the watchdog is already running when the probe
+ * function is called, and it can't be pinged immediately. This
+ * function must be called immediately after watchdog registration,
+ * and min_hw_heartbeat_ms must be set for this to be useful.
+ *
+ * Return: 0 if successful, error otherwise.
  */
 int watchdog_set_last_hw_keepalive(struct watchdog_device *wdd,
                                   unsigned int last_ping_ms)
@@ -1180,12 +1186,13 @@ int watchdog_set_last_hw_keepalive(struct watchdog_device *wdd,
 }
 EXPORT_SYMBOL_GPL(watchdog_set_last_hw_keepalive);
 
-/*
- *     watchdog_dev_init: init dev part of watchdog core
+/**
+ * watchdog_dev_init - init dev part of watchdog core
  *
- *     Allocate a range of chardev nodes to use for watchdog devices
+ * Allocate a range of chardev nodes to use for watchdog devices.
+ *
+ * Return: 0 if successful, error otherwise.
  */
-
 int __init watchdog_dev_init(void)
 {
        int err;
@@ -1218,12 +1225,11 @@ err_register:
        return err;
 }
 
-/*
- *     watchdog_dev_exit: exit dev part of watchdog core
+/**
+ * watchdog_dev_exit - exit dev part of watchdog core
  *
- *     Release the range of chardev nodes used for watchdog devices
+ * Release the range of chardev nodes used for watchdog devices.
  */
-
 void __exit watchdog_dev_exit(void)
 {
        unregister_chrdev_region(watchdog_devt, MAX_DOGS);
index dfe26fa17e95d783728612cfaa22be67d0c10961..617a7f4f07a807e446adde5b2369088e4b65dba3 100644 (file)
@@ -689,29 +689,34 @@ void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages)
 }
 EXPORT_SYMBOL(xen_free_ballooned_pages);
 
-#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
-static void __init balloon_add_region(unsigned long start_pfn,
-                                     unsigned long pages)
+static void __init balloon_add_regions(void)
 {
+#if defined(CONFIG_XEN_PV)
+       unsigned long start_pfn, pages;
        unsigned long pfn, extra_pfn_end;
+       unsigned int i;
 
-       /*
-        * If the amount of usable memory has been limited (e.g., with
-        * the 'mem' command line parameter), don't add pages beyond
-        * this limit.
-        */
-       extra_pfn_end = min(max_pfn, start_pfn + pages);
+       for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+               pages = xen_extra_mem[i].n_pfns;
+               if (!pages)
+                       continue;
 
-       for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
-               /* totalram_pages and totalhigh_pages do not
-                  include the boot-time balloon extension, so
-                  don't subtract from it. */
-               balloon_append(pfn_to_page(pfn));
-       }
+               start_pfn = xen_extra_mem[i].start_pfn;
 
-       balloon_stats.total_pages += extra_pfn_end - start_pfn;
-}
+               /*
+                * If the amount of usable memory has been limited (e.g., with
+                * the 'mem' command line parameter), don't add pages beyond
+                * this limit.
+                */
+               extra_pfn_end = min(max_pfn, start_pfn + pages);
+
+               for (pfn = start_pfn; pfn < extra_pfn_end; pfn++)
+                       balloon_append(pfn_to_page(pfn));
+
+               balloon_stats.total_pages += extra_pfn_end - start_pfn;
+       }
 #endif
+}
 
 static int __init balloon_init(void)
 {
@@ -745,20 +750,7 @@ static int __init balloon_init(void)
        register_sysctl_table(xen_root);
 #endif
 
-#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
-       {
-               int i;
-
-               /*
-                * Initialize the balloon with pages from the extra memory
-                * regions (see arch/x86/xen/setup.c).
-                */
-               for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
-                       if (xen_extra_mem[i].n_pfns)
-                               balloon_add_region(xen_extra_mem[i].start_pfn,
-                                                  xen_extra_mem[i].n_pfns);
-       }
-#endif
+       balloon_add_regions();
 
        task = kthread_run(balloon_thread, NULL, "xen-balloon");
        if (IS_ERR(task)) {
index a8b41057c382898f04b2a04b207534eb1284ea19..a39f2d36dd9cfc7b4016afedb0c8c9c24f3aa8e4 100644 (file)
@@ -230,39 +230,6 @@ void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
 }
 EXPORT_SYMBOL(xen_free_unpopulated_pages);
 
-#ifdef CONFIG_XEN_PV
-static int __init init(void)
-{
-       unsigned int i;
-
-       if (!xen_domain())
-               return -ENODEV;
-
-       if (!xen_pv_domain())
-               return 0;
-
-       /*
-        * Initialize with pages from the extra memory regions (see
-        * arch/x86/xen/setup.c).
-        */
-       for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
-               unsigned int j;
-
-               for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
-                       struct page *pg =
-                               pfn_to_page(xen_extra_mem[i].start_pfn + j);
-
-                       pg->zone_device_data = page_list;
-                       page_list = pg;
-                       list_count++;
-               }
-       }
-
-       return 0;
-}
-subsys_initcall(init);
-#endif
-
 static int __init unpopulated_init(void)
 {
        int ret;
index 55e108e5e1335801cdce893dd02525c38960d325..1c8dc696d516fc925fa93f1f0d33095bd9316eec 100644 (file)
@@ -49,22 +49,20 @@ int v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses,
 
 void v9fs_cache_inode_get_cookie(struct inode *inode)
 {
-       struct v9fs_inode *v9inode;
+       struct v9fs_inode *v9inode = V9FS_I(inode);
        struct v9fs_session_info *v9ses;
        __le32 version;
        __le64 path;
 
        if (!S_ISREG(inode->i_mode))
                return;
-
-       v9inode = V9FS_I(inode);
-       if (WARN_ON(v9inode->fscache))
+       if (WARN_ON(v9fs_inode_cookie(v9inode)))
                return;
 
        version = cpu_to_le32(v9inode->qid.version);
        path = cpu_to_le64(v9inode->qid.path);
        v9ses = v9fs_inode2v9ses(inode);
-       v9inode->fscache =
+       v9inode->netfs_ctx.cache =
                fscache_acquire_cookie(v9fs_session_cache(v9ses),
                                       0,
                                       &path, sizeof(path),
@@ -72,5 +70,5 @@ void v9fs_cache_inode_get_cookie(struct inode *inode)
                                       i_size_read(&v9inode->vfs_inode));
 
        p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
-                inode, v9inode->fscache);
+                inode, v9fs_inode_cookie(v9inode));
 }
index 08f65c40af4f5d435f95af7d5f8e3b8951ecea74..e28ddf763b3b92e3cfecabffa424a97a0731f3cd 100644 (file)
@@ -623,9 +623,7 @@ static void v9fs_sysfs_cleanup(void)
 static void v9fs_inode_init_once(void *foo)
 {
        struct v9fs_inode *v9inode = (struct v9fs_inode *)foo;
-#ifdef CONFIG_9P_FSCACHE
-       v9inode->fscache = NULL;
-#endif
+
        memset(&v9inode->qid, 0, sizeof(v9inode->qid));
        inode_init_once(&v9inode->vfs_inode);
 }
index bc8b30205d3694cfb84cb83f9fb22651aecf0b72..ec0e8df3b2eb27b1337c5259c635bdf3598205d2 100644 (file)
@@ -9,6 +9,7 @@
 #define FS_9P_V9FS_H
 
 #include <linux/backing-dev.h>
+#include <linux/netfs.h>
 
 /**
  * enum p9_session_flags - option flags for each 9P session
@@ -108,14 +109,15 @@ struct v9fs_session_info {
 #define V9FS_INO_INVALID_ATTR 0x01
 
 struct v9fs_inode {
-#ifdef CONFIG_9P_FSCACHE
-       struct fscache_cookie *fscache;
-#endif
+       struct {
+               /* These must be contiguous */
+               struct inode    vfs_inode;      /* the VFS's inode record */
+               struct netfs_i_context netfs_ctx; /* Netfslib context */
+       };
        struct p9_qid qid;
        unsigned int cache_validity;
        struct p9_fid *writeback_fid;
        struct mutex v_mutex;
-       struct inode vfs_inode;
 };
 
 static inline struct v9fs_inode *V9FS_I(const struct inode *inode)
@@ -126,7 +128,7 @@ static inline struct v9fs_inode *V9FS_I(const struct inode *inode)
 static inline struct fscache_cookie *v9fs_inode_cookie(struct v9fs_inode *v9inode)
 {
 #ifdef CONFIG_9P_FSCACHE
-       return v9inode->fscache;
+       return netfs_i_cookie(&v9inode->vfs_inode);
 #else
        return NULL;
 #endif
@@ -163,6 +165,7 @@ extern struct inode *v9fs_inode_from_fid(struct v9fs_session_info *v9ses,
 extern const struct inode_operations v9fs_dir_inode_operations_dotl;
 extern const struct inode_operations v9fs_file_inode_operations_dotl;
 extern const struct inode_operations v9fs_symlink_inode_operations_dotl;
+extern const struct netfs_request_ops v9fs_req_ops;
 extern struct inode *v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses,
                                              struct p9_fid *fid,
                                              struct super_block *sb, int new);
index 76956c9d2af9e98cec72240244fcdf651d2db6a3..5011281883432dc9111ef498a6d5abc55f14a1e3 100644 (file)
 #include "fid.h"
 
 /**
- * v9fs_req_issue_op - Issue a read from 9P
+ * v9fs_issue_read - Issue a read from 9P
  * @subreq: The read to make
  */
-static void v9fs_req_issue_op(struct netfs_read_subrequest *subreq)
+static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
 {
-       struct netfs_read_request *rreq = subreq->rreq;
+       struct netfs_io_request *rreq = subreq->rreq;
        struct p9_fid *fid = rreq->netfs_priv;
        struct iov_iter to;
        loff_t pos = subreq->start + subreq->transferred;
@@ -52,20 +52,21 @@ static void v9fs_req_issue_op(struct netfs_read_subrequest *subreq)
 }
 
 /**
- * v9fs_init_rreq - Initialise a read request
+ * v9fs_init_request - Initialise a read request
  * @rreq: The read request
  * @file: The file being read from
  */
-static void v9fs_init_rreq(struct netfs_read_request *rreq, struct file *file)
+static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
 {
        struct p9_fid *fid = file->private_data;
 
        refcount_inc(&fid->count);
        rreq->netfs_priv = fid;
+       return 0;
 }
 
 /**
- * v9fs_req_cleanup - Cleanup request initialized by v9fs_init_rreq
+ * v9fs_req_cleanup - Cleanup request initialized by v9fs_init_request
  * @mapping: unused mapping of request to cleanup
  * @priv: private data to cleanup, a fid, guaranted non-null.
  */
@@ -76,22 +77,11 @@ static void v9fs_req_cleanup(struct address_space *mapping, void *priv)
        p9_client_clunk(fid);
 }
 
-/**
- * v9fs_is_cache_enabled - Determine if caching is enabled for an inode
- * @inode: The inode to check
- */
-static bool v9fs_is_cache_enabled(struct inode *inode)
-{
-       struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(inode));
-
-       return fscache_cookie_enabled(cookie) && cookie->cache_priv;
-}
-
 /**
  * v9fs_begin_cache_operation - Begin a cache operation for a read
  * @rreq: The read request
  */
-static int v9fs_begin_cache_operation(struct netfs_read_request *rreq)
+static int v9fs_begin_cache_operation(struct netfs_io_request *rreq)
 {
 #ifdef CONFIG_9P_FSCACHE
        struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode));
@@ -102,36 +92,13 @@ static int v9fs_begin_cache_operation(struct netfs_read_request *rreq)
 #endif
 }
 
-static const struct netfs_read_request_ops v9fs_req_ops = {
-       .init_rreq              = v9fs_init_rreq,
-       .is_cache_enabled       = v9fs_is_cache_enabled,
+const struct netfs_request_ops v9fs_req_ops = {
+       .init_request           = v9fs_init_request,
        .begin_cache_operation  = v9fs_begin_cache_operation,
-       .issue_op               = v9fs_req_issue_op,
+       .issue_read             = v9fs_issue_read,
        .cleanup                = v9fs_req_cleanup,
 };
 
-/**
- * v9fs_vfs_readpage - read an entire page in from 9P
- * @file: file being read
- * @page: structure to page
- *
- */
-static int v9fs_vfs_readpage(struct file *file, struct page *page)
-{
-       struct folio *folio = page_folio(page);
-
-       return netfs_readpage(file, folio, &v9fs_req_ops, NULL);
-}
-
-/**
- * v9fs_vfs_readahead - read a set of pages from 9P
- * @ractl: The readahead parameters
- */
-static void v9fs_vfs_readahead(struct readahead_control *ractl)
-{
-       netfs_readahead(ractl, &v9fs_req_ops, NULL);
-}
-
 /**
  * v9fs_release_page - release the private state associated with a page
  * @page: The page to be released
@@ -308,8 +275,7 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
         * file.  We need to do this before we get a lock on the page in case
         * there's more than one writer competing for the same cache block.
         */
-       retval = netfs_write_begin(filp, mapping, pos, len, flags, &folio, fsdata,
-                                  &v9fs_req_ops, NULL);
+       retval = netfs_write_begin(filp, mapping, pos, len, flags, &folio, fsdata);
        if (retval < 0)
                return retval;
 
@@ -370,8 +336,8 @@ static bool v9fs_dirty_folio(struct address_space *mapping, struct folio *folio)
 #endif
 
 const struct address_space_operations v9fs_addr_operations = {
-       .readpage = v9fs_vfs_readpage,
-       .readahead = v9fs_vfs_readahead,
+       .readpage = netfs_readpage,
+       .readahead = netfs_readahead,
        .dirty_folio = v9fs_dirty_folio,
        .writepage = v9fs_vfs_writepage,
        .write_begin = v9fs_write_begin,
index 84c3cf7dffa5eb58063705745a8dab6074b66e57..55367ecb9442e0129fe2e9b89778b063d48fc9fa 100644 (file)
@@ -231,9 +231,6 @@ struct inode *v9fs_alloc_inode(struct super_block *sb)
        v9inode = alloc_inode_sb(sb, v9fs_inode_cache, GFP_KERNEL);
        if (!v9inode)
                return NULL;
-#ifdef CONFIG_9P_FSCACHE
-       v9inode->fscache = NULL;
-#endif
        v9inode->writeback_fid = NULL;
        v9inode->cache_validity = 0;
        mutex_init(&v9inode->v_mutex);
@@ -250,6 +247,14 @@ void v9fs_free_inode(struct inode *inode)
        kmem_cache_free(v9fs_inode_cache, V9FS_I(inode));
 }
 
+/*
+ * Set parameters for the netfs library
+ */
+static void v9fs_set_netfs_context(struct inode *inode)
+{
+       netfs_i_context_init(inode, &v9fs_req_ops);
+}
+
 int v9fs_init_inode(struct v9fs_session_info *v9ses,
                    struct inode *inode, umode_t mode, dev_t rdev)
 {
@@ -338,6 +343,8 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
                err = -EINVAL;
                goto error;
        }
+
+       v9fs_set_netfs_context(inode);
 error:
        return err;
 
index db832cc931c877c08ca0197e714486e3cfc8c83a..f120bcb8bf738d6434179bd0dfa7da1fd4fef243 100644 (file)
@@ -76,6 +76,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
        /* there shouldn't be an existing inode */
        BUG_ON(!(inode->i_state & I_NEW));
 
+       netfs_i_context_init(inode, NULL);
        inode->i_size           = 0;
        inode->i_mode           = S_IFDIR | S_IRUGO | S_IXUGO;
        if (root) {
index 0f9fdb284a2022423fe323d902aff6c0f71be615..26292a110a8f9abf03471a402fdf567a4a6bfcc0 100644 (file)
 #include "internal.h"
 
 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
-static int afs_readpage(struct file *file, struct page *page);
 static int afs_symlink_readpage(struct file *file, struct page *page);
 static void afs_invalidate_folio(struct folio *folio, size_t offset,
                               size_t length);
 static int afs_releasepage(struct page *page, gfp_t gfp_flags);
 
-static void afs_readahead(struct readahead_control *ractl);
 static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
 static void afs_vm_open(struct vm_area_struct *area);
 static void afs_vm_close(struct vm_area_struct *area);
@@ -52,8 +50,8 @@ const struct inode_operations afs_file_inode_operations = {
 };
 
 const struct address_space_operations afs_file_aops = {
-       .readpage       = afs_readpage,
-       .readahead      = afs_readahead,
+       .readpage       = netfs_readpage,
+       .readahead      = netfs_readahead,
        .dirty_folio    = afs_dirty_folio,
        .launder_folio  = afs_launder_folio,
        .releasepage    = afs_releasepage,
@@ -240,7 +238,7 @@ void afs_put_read(struct afs_read *req)
 static void afs_fetch_data_notify(struct afs_operation *op)
 {
        struct afs_read *req = op->fetch.req;
-       struct netfs_read_subrequest *subreq = req->subreq;
+       struct netfs_io_subrequest *subreq = req->subreq;
        int error = op->error;
 
        if (error == -ECONNABORTED)
@@ -310,7 +308,7 @@ int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req)
        return afs_do_sync_operation(op);
 }
 
-static void afs_req_issue_op(struct netfs_read_subrequest *subreq)
+static void afs_issue_read(struct netfs_io_subrequest *subreq)
 {
        struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode);
        struct afs_read *fsreq;
@@ -359,19 +357,13 @@ static int afs_symlink_readpage(struct file *file, struct page *page)
        return ret;
 }
 
-static void afs_init_rreq(struct netfs_read_request *rreq, struct file *file)
+static int afs_init_request(struct netfs_io_request *rreq, struct file *file)
 {
        rreq->netfs_priv = key_get(afs_file_key(file));
+       return 0;
 }
 
-static bool afs_is_cache_enabled(struct inode *inode)
-{
-       struct fscache_cookie *cookie = afs_vnode_cache(AFS_FS_I(inode));
-
-       return fscache_cookie_enabled(cookie) && cookie->cache_priv;
-}
-
-static int afs_begin_cache_operation(struct netfs_read_request *rreq)
+static int afs_begin_cache_operation(struct netfs_io_request *rreq)
 {
 #ifdef CONFIG_AFS_FSCACHE
        struct afs_vnode *vnode = AFS_FS_I(rreq->inode);
@@ -396,27 +388,14 @@ static void afs_priv_cleanup(struct address_space *mapping, void *netfs_priv)
        key_put(netfs_priv);
 }
 
-const struct netfs_read_request_ops afs_req_ops = {
-       .init_rreq              = afs_init_rreq,
-       .is_cache_enabled       = afs_is_cache_enabled,
+const struct netfs_request_ops afs_req_ops = {
+       .init_request           = afs_init_request,
        .begin_cache_operation  = afs_begin_cache_operation,
        .check_write_begin      = afs_check_write_begin,
-       .issue_op               = afs_req_issue_op,
+       .issue_read             = afs_issue_read,
        .cleanup                = afs_priv_cleanup,
 };
 
-static int afs_readpage(struct file *file, struct page *page)
-{
-       struct folio *folio = page_folio(page);
-
-       return netfs_readpage(file, folio, &afs_req_ops, NULL);
-}
-
-static void afs_readahead(struct readahead_control *ractl)
-{
-       netfs_readahead(ractl, &afs_req_ops, NULL);
-}
-
 int afs_write_inode(struct inode *inode, struct writeback_control *wbc)
 {
        fscache_unpin_writeback(wbc, afs_vnode_cache(AFS_FS_I(inode)));
index 5964f8aee090ffa0900bab7d4c49c81bd3855eb2..2fe402483ad5bbc59750e2096f7127a3ba2152c0 100644 (file)
@@ -53,6 +53,14 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren
                dump_stack();
 }
 
+/*
+ * Set parameters for the netfs library
+ */
+static void afs_set_netfs_context(struct afs_vnode *vnode)
+{
+       netfs_i_context_init(&vnode->vfs_inode, &afs_req_ops);
+}
+
 /*
  * Initialise an inode from the vnode status.
  */
@@ -128,6 +136,7 @@ static int afs_inode_init_from_status(struct afs_operation *op,
        }
 
        afs_set_i_size(vnode, status->size);
+       afs_set_netfs_context(vnode);
 
        vnode->invalid_before   = status->data_version;
        inode_set_iversion_raw(&vnode->vfs_inode, status->data_version);
@@ -237,6 +246,7 @@ static void afs_apply_status(struct afs_operation *op,
                 * idea of what the size should be that's not the same as
                 * what's on the server.
                 */
+               vnode->netfs_ctx.remote_i_size = status->size;
                if (change_size) {
                        afs_set_i_size(vnode, status->size);
                        inode->i_ctime = t;
@@ -420,7 +430,7 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
        struct afs_vnode_cache_aux aux;
 
        if (vnode->status.type != AFS_FTYPE_FILE) {
-               vnode->cache = NULL;
+               vnode->netfs_ctx.cache = NULL;
                return;
        }
 
@@ -430,12 +440,14 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
        key.vnode_id_ext[1]     = htonl(vnode->fid.vnode_hi);
        afs_set_cache_aux(vnode, &aux);
 
-       vnode->cache = fscache_acquire_cookie(
-               vnode->volume->cache,
-               vnode->status.type == AFS_FTYPE_FILE ? 0 : FSCACHE_ADV_SINGLE_CHUNK,
-               &key, sizeof(key),
-               &aux, sizeof(aux),
-               vnode->status.size);
+       afs_vnode_set_cache(vnode,
+                           fscache_acquire_cookie(
+                                   vnode->volume->cache,
+                                   vnode->status.type == AFS_FTYPE_FILE ?
+                                   0 : FSCACHE_ADV_SINGLE_CHUNK,
+                                   &key, sizeof(key),
+                                   &aux, sizeof(aux),
+                                   vnode->status.size));
 #endif
 }
 
@@ -528,6 +540,7 @@ struct inode *afs_root_iget(struct super_block *sb, struct key *key)
 
        vnode = AFS_FS_I(inode);
        vnode->cb_v_break = as->volume->cb_v_break,
+       afs_set_netfs_context(vnode);
 
        op = afs_alloc_operation(key, as->volume);
        if (IS_ERR(op)) {
@@ -786,11 +799,8 @@ void afs_evict_inode(struct inode *inode)
                afs_put_wb_key(wbk);
        }
 
-#ifdef CONFIG_AFS_FSCACHE
-       fscache_relinquish_cookie(vnode->cache,
+       fscache_relinquish_cookie(afs_vnode_cache(vnode),
                                  test_bit(AFS_VNODE_DELETED, &vnode->flags));
-       vnode->cache = NULL;
-#endif
 
        afs_prune_wb_keys(vnode);
        afs_put_permits(rcu_access_pointer(vnode->permit_cache));
index dc5032e102447f148029350d4cb9131f7069c35c..7b7ef945dc78e460aa4580b16ab38bcd24cc90eb 100644 (file)
@@ -207,7 +207,7 @@ struct afs_read {
        loff_t                  file_size;      /* File size returned by server */
        struct key              *key;           /* The key to use to reissue the read */
        struct afs_vnode        *vnode;         /* The file being read into. */
-       struct netfs_read_subrequest *subreq;   /* Fscache helper read request this belongs to */
+       struct netfs_io_subrequest *subreq;     /* Fscache helper read request this belongs to */
        afs_dataversion_t       data_version;   /* Version number returned by server */
        refcount_t              usage;
        unsigned int            call_debug_id;
@@ -619,15 +619,16 @@ enum afs_lock_state {
  * leak from one inode to another.
  */
 struct afs_vnode {
-       struct inode            vfs_inode;      /* the VFS's inode record */
+       struct {
+               /* These must be contiguous */
+               struct inode    vfs_inode;      /* the VFS's inode record */
+               struct netfs_i_context netfs_ctx; /* Netfslib context */
+       };
 
        struct afs_volume       *volume;        /* volume on which vnode resides */
        struct afs_fid          fid;            /* the file identifier for this inode */
        struct afs_file_status  status;         /* AFS status info for this file */
        afs_dataversion_t       invalid_before; /* Child dentries are invalid before this */
-#ifdef CONFIG_AFS_FSCACHE
-       struct fscache_cookie   *cache;         /* caching cookie */
-#endif
        struct afs_permits __rcu *permit_cache; /* cache of permits so far obtained */
        struct mutex            io_lock;        /* Lock for serialising I/O on this mutex */
        struct rw_semaphore     validate_lock;  /* lock for validating this vnode */
@@ -674,12 +675,20 @@ struct afs_vnode {
 static inline struct fscache_cookie *afs_vnode_cache(struct afs_vnode *vnode)
 {
 #ifdef CONFIG_AFS_FSCACHE
-       return vnode->cache;
+       return netfs_i_cookie(&vnode->vfs_inode);
 #else
        return NULL;
 #endif
 }
 
+static inline void afs_vnode_set_cache(struct afs_vnode *vnode,
+                                      struct fscache_cookie *cookie)
+{
+#ifdef CONFIG_AFS_FSCACHE
+       vnode->netfs_ctx.cache = cookie;
+#endif
+}
+
 /*
  * cached security record for one user's attempt to access a vnode
  */
@@ -1063,7 +1072,7 @@ extern const struct address_space_operations afs_file_aops;
 extern const struct address_space_operations afs_symlink_aops;
 extern const struct inode_operations afs_file_inode_operations;
 extern const struct file_operations afs_file_operations;
-extern const struct netfs_read_request_ops afs_req_ops;
+extern const struct netfs_request_ops afs_req_ops;
 
 extern int afs_cache_wb_key(struct afs_vnode *, struct afs_file *);
 extern void afs_put_wb_key(struct afs_wb_key *);
index 7592c0f469f140864b909549fda489bcb638bb17..1fea195b0b2750d7d5282dddff4b80c7b96ef720 100644 (file)
@@ -688,13 +688,11 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
        /* Reset anything that shouldn't leak from one inode to the next. */
        memset(&vnode->fid, 0, sizeof(vnode->fid));
        memset(&vnode->status, 0, sizeof(vnode->status));
+       afs_vnode_set_cache(vnode, NULL);
 
        vnode->volume           = NULL;
        vnode->lock_key         = NULL;
        vnode->permit_cache     = NULL;
-#ifdef CONFIG_AFS_FSCACHE
-       vnode->cache            = NULL;
-#endif
 
        vnode->flags            = 1 << AFS_VNODE_UNSET;
        vnode->lock_state       = AFS_VNODE_LOCK_NONE;
index e1c17081d18e3493d06a6972d1878f3057200684..4763132ca57e7f53d4ca82ab64282fe8a3848f11 100644 (file)
@@ -60,8 +60,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
         * file.  We need to do this before we get a lock on the page in case
         * there's more than one writer competing for the same cache block.
         */
-       ret = netfs_write_begin(file, mapping, pos, len, flags, &folio, fsdata,
-                               &afs_req_ops, NULL);
+       ret = netfs_write_begin(file, mapping, pos, len, flags, &folio, fsdata);
        if (ret < 0)
                return ret;
 
@@ -355,9 +354,10 @@ static const struct afs_operation_ops afs_store_data_operation = {
 static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos,
                          bool laundering)
 {
+       struct netfs_i_context *ictx = &vnode->netfs_ctx;
        struct afs_operation *op;
        struct afs_wb_key *wbk = NULL;
-       loff_t size = iov_iter_count(iter), i_size;
+       loff_t size = iov_iter_count(iter);
        int ret = -ENOKEY;
 
        _enter("%s{%llx:%llu.%u},%llx,%llx",
@@ -379,15 +379,13 @@ static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t
                return -ENOMEM;
        }
 
-       i_size = i_size_read(&vnode->vfs_inode);
-
        afs_op_set_vnode(op, 0, vnode);
        op->file[0].dv_delta = 1;
        op->file[0].modification = true;
        op->store.write_iter = iter;
        op->store.pos = pos;
        op->store.size = size;
-       op->store.i_size = max(pos + size, i_size);
+       op->store.i_size = max(pos + size, ictx->remote_i_size);
        op->store.laundering = laundering;
        op->mtime = vnode->vfs_inode.i_mtime;
        op->flags |= AFS_OPERATION_UNINTR;
@@ -618,8 +616,7 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
                _debug("write discard %x @%llx [%llx]", len, start, i_size);
 
                /* The dirty region was entirely beyond the EOF. */
-               fscache_clear_page_bits(afs_vnode_cache(vnode),
-                                       mapping, start, len, caching);
+               fscache_clear_page_bits(mapping, start, len, caching);
                afs_pages_written_back(vnode, start, len);
                ret = 0;
        }
index 7b66b93d5bfa7a3e0e750c7eed1fd890116ca2d5..3c249b93863274ccb6b1d10c05fa901861e5c848 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1552,7 +1552,6 @@ static int aio_read(struct kiocb *req, const struct iocb *iocb,
        file = req->ki_filp;
        if (unlikely(!(file->f_mode & FMODE_READ)))
                return -EBADF;
-       ret = -EINVAL;
        if (unlikely(!file->f_op->read_iter))
                return -EINVAL;
 
index 6556e13ed95f0dcc6634708c722e5feaa79853d5..63c7ebb0da8987a5ca86db763d9e48a8ea7365e9 100644 (file)
@@ -1117,11 +1117,11 @@ out_free_interp:
                         * independently randomized mmap region (0 load_bias
                         * without MAP_FIXED nor MAP_FIXED_NOREPLACE).
                         */
-                       alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
-                       if (interpreter || alignment > ELF_MIN_ALIGN) {
+                       if (interpreter) {
                                load_bias = ELF_ET_DYN_BASE;
                                if (current->flags & PF_RANDOMIZE)
                                        load_bias += arch_mmap_rnd();
+                               alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
                                if (alignment)
                                        load_bias &= ~(alignment - 1);
                                elf_flags |= MAP_FIXED_NOREPLACE;
index c22d287e020b3d4f2a8afcffb6b9f4b84bae82a9..0dd6de994199947850d6b06bec5cf93bfc0094a5 100644 (file)
@@ -2503,12 +2503,6 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
                return ERR_PTR(ret);
        }
 
-       /*
-        * New block group is likely to be used soon. Try to activate it now.
-        * Failure is OK for now.
-        */
-       btrfs_zone_activate(cache);
-
        ret = exclude_super_stripes(cache);
        if (ret) {
                /* We may have excluded something, so call this just in case */
@@ -2946,7 +2940,6 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
        struct btrfs_path *path = NULL;
        LIST_HEAD(dirty);
        struct list_head *io = &cur_trans->io_bgs;
-       int num_started = 0;
        int loops = 0;
 
        spin_lock(&cur_trans->dirty_bgs_lock);
@@ -3012,7 +3005,6 @@ again:
                        cache->io_ctl.inode = NULL;
                        ret = btrfs_write_out_cache(trans, cache, path);
                        if (ret == 0 && cache->io_ctl.inode) {
-                               num_started++;
                                should_put = 0;
 
                                /*
@@ -3113,7 +3105,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
        int should_put;
        struct btrfs_path *path;
        struct list_head *io = &cur_trans->io_bgs;
-       int num_started = 0;
 
        path = btrfs_alloc_path();
        if (!path)
@@ -3171,7 +3162,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
                        cache->io_ctl.inode = NULL;
                        ret = btrfs_write_out_cache(trans, cache, path);
                        if (ret == 0 && cache->io_ctl.inode) {
-                               num_started++;
                                should_put = 0;
                                list_add_tail(&cache->io_list, io);
                        } else {
@@ -3455,7 +3445,7 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
        return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
 }
 
-static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
+static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
 {
        struct btrfs_block_group *bg;
        int ret;
@@ -3542,7 +3532,11 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
 out:
        btrfs_trans_release_chunk_metadata(trans);
 
-       return ret;
+       if (ret)
+               return ERR_PTR(ret);
+
+       btrfs_get_block_group(bg);
+       return bg;
 }
 
 /*
@@ -3657,10 +3651,17 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
 {
        struct btrfs_fs_info *fs_info = trans->fs_info;
        struct btrfs_space_info *space_info;
+       struct btrfs_block_group *ret_bg;
        bool wait_for_alloc = false;
        bool should_alloc = false;
+       bool from_extent_allocation = false;
        int ret = 0;
 
+       if (force == CHUNK_ALLOC_FORCE_FOR_EXTENT) {
+               from_extent_allocation = true;
+               force = CHUNK_ALLOC_FORCE;
+       }
+
        /* Don't re-enter if we're already allocating a chunk */
        if (trans->allocating_chunk)
                return -ENOSPC;
@@ -3750,9 +3751,22 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
                        force_metadata_allocation(fs_info);
        }
 
-       ret = do_chunk_alloc(trans, flags);
+       ret_bg = do_chunk_alloc(trans, flags);
        trans->allocating_chunk = false;
 
+       if (IS_ERR(ret_bg)) {
+               ret = PTR_ERR(ret_bg);
+       } else if (from_extent_allocation) {
+               /*
+                * New block group is likely to be used soon. Try to activate
+                * it now. Failure is OK for now.
+                */
+               btrfs_zone_activate(ret_bg);
+       }
+
+       if (!ret)
+               btrfs_put_block_group(ret_bg);
+
        spin_lock(&space_info->lock);
        if (ret < 0) {
                if (ret == -ENOSPC)
index 93aabc68bb6a8e9c104d72474b12adaa9d08a546..e8308f2ad07d1988e408f0082fc34291fc9c429c 100644 (file)
@@ -35,11 +35,15 @@ enum btrfs_discard_state {
  * the FS with empty chunks
  *
  * CHUNK_ALLOC_FORCE means it must try to allocate one
+ *
+ * CHUNK_ALLOC_FORCE_FOR_EXTENT like CHUNK_ALLOC_FORCE but called from
+ * find_free_extent() that also activaes the zone
  */
 enum btrfs_chunk_alloc_enum {
        CHUNK_ALLOC_NO_FORCE,
        CHUNK_ALLOC_LIMITED,
        CHUNK_ALLOC_FORCE,
+       CHUNK_ALLOC_FORCE_FOR_EXTENT,
 };
 
 struct btrfs_caching_control {
index be476f094300ac8e53e3309963cd469664a6b31e..19bf36d8ffea7286dbee1388149d2bbeeb3b7011 100644 (file)
@@ -537,6 +537,9 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
        cb->orig_bio = NULL;
        cb->nr_pages = nr_pages;
 
+       if (blkcg_css)
+               kthread_associate_blkcg(blkcg_css);
+
        while (cur_disk_bytenr < disk_start + compressed_len) {
                u64 offset = cur_disk_bytenr - disk_start;
                unsigned int index = offset >> PAGE_SHIFT;
@@ -555,6 +558,8 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
                                bio = NULL;
                                goto finish_cb;
                        }
+                       if (blkcg_css)
+                               bio->bi_opf |= REQ_CGROUP_PUNT;
                }
                /*
                 * We should never reach next_stripe_start start as we will
@@ -612,6 +617,9 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
        return 0;
 
 finish_cb:
+       if (blkcg_css)
+               kthread_associate_blkcg(NULL);
+
        if (bio) {
                bio->bi_status = ret;
                bio_endio(bio);
index b30309f187cf022c3910b83d92a26b2660875364..126f244cdf883cdc6c764a30f55aaa1da00982f3 100644 (file)
@@ -1850,9 +1850,10 @@ again:
 
        ret = btrfs_insert_fs_root(fs_info, root);
        if (ret) {
-               btrfs_put_root(root);
-               if (ret == -EEXIST)
+               if (ret == -EEXIST) {
+                       btrfs_put_root(root);
                        goto again;
+               }
                goto fail;
        }
        return root;
index f477035a2ac2358015bcdff41f733906d04fe872..6aa92f84f46547f5144164778926ce420701772e 100644 (file)
@@ -4082,7 +4082,7 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
                        }
 
                        ret = btrfs_chunk_alloc(trans, ffe_ctl->flags,
-                                               CHUNK_ALLOC_FORCE);
+                                               CHUNK_ALLOC_FORCE_FOR_EXTENT);
 
                        /* Do not bail out on ENOSPC since we can do more. */
                        if (ret == -ENOSPC)
index 0399cf8e3c32c5c3a5e5141bd07cea93e2abcaa6..151e9da5da2dc2d553fb4f0582ad882133c916e9 100644 (file)
@@ -118,7 +118,7 @@ struct btrfs_bio_ctrl {
  */
 struct extent_changeset {
        /* How many bytes are set/cleared in this operation */
-       unsigned int bytes_changed;
+       u64 bytes_changed;
 
        /* Changed ranges */
        struct ulist range_changed;
index 9f455c96c9744b5f9ed3af1599eb4b375b072907..380054c94e4b6a1cb2502f06998acfc015806db3 100644 (file)
@@ -2957,8 +2957,9 @@ out:
        return ret;
 }
 
-static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
+static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
 {
+       struct inode *inode = file_inode(file);
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct extent_state *cached_state = NULL;
@@ -2990,6 +2991,10 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
                goto out_only_mutex;
        }
 
+       ret = file_modified(file);
+       if (ret)
+               goto out_only_mutex;
+
        lockstart = round_up(offset, btrfs_inode_sectorsize(BTRFS_I(inode)));
        lockend = round_down(offset + len,
                             btrfs_inode_sectorsize(BTRFS_I(inode))) - 1;
@@ -3430,7 +3435,7 @@ static long btrfs_fallocate(struct file *file, int mode,
                return -EOPNOTSUPP;
 
        if (mode & FALLOC_FL_PUNCH_HOLE)
-               return btrfs_punch_hole(inode, offset, len);
+               return btrfs_punch_hole(file, offset, len);
 
        /*
         * Only trigger disk allocation, don't trigger qgroup reserve
@@ -3452,6 +3457,10 @@ static long btrfs_fallocate(struct file *file, int mode,
                        goto out;
        }
 
+       ret = file_modified(file);
+       if (ret)
+               goto out;
+
        /*
         * TODO: Move these two operations after we have checked
         * accurate reserved space, or fallocate can still fail but
index aa0a60ee26cb742b31e1ca0985a4d8964befb7cd..5082b9c70f8c9dc57340e7825f7e1437e9ee006d 100644 (file)
@@ -1128,7 +1128,6 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
        int ret = 0;
 
        if (btrfs_is_free_space_inode(inode)) {
-               WARN_ON_ONCE(1);
                ret = -EINVAL;
                goto out_unlock;
        }
@@ -2017,8 +2016,7 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
                 * to use run_delalloc_nocow() here, like for  regular
                 * preallocated inodes.
                 */
-               ASSERT(!zoned ||
-                      (zoned && btrfs_is_data_reloc_root(inode->root)));
+               ASSERT(!zoned || btrfs_is_data_reloc_root(inode->root));
                ret = run_delalloc_nocow(inode, locked_page, start, end,
                                         page_started, nr_written);
        } else if (!inode_can_compress(inode) ||
@@ -4488,6 +4486,13 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
                           dest->root_key.objectid);
                return -EPERM;
        }
+       if (atomic_read(&dest->nr_swapfiles)) {
+               spin_unlock(&dest->root_item_lock);
+               btrfs_warn(fs_info,
+                          "attempt to delete subvolume %llu with active swapfile",
+                          root->root_key.objectid);
+               return -EPERM;
+       }
        root_flags = btrfs_root_flags(&dest->root_item);
        btrfs_set_root_flags(&dest->root_item,
                             root_flags | BTRFS_ROOT_SUBVOL_DEAD);
@@ -7438,6 +7443,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
        u64 block_start, orig_start, orig_block_len, ram_bytes;
        bool can_nocow = false;
        bool space_reserved = false;
+       u64 prev_len;
        int ret = 0;
 
        /*
@@ -7465,6 +7471,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
                        can_nocow = true;
        }
 
+       prev_len = len;
        if (can_nocow) {
                struct extent_map *em2;
 
@@ -7494,8 +7501,6 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
                        goto out;
                }
        } else {
-               const u64 prev_len = len;
-
                /* Our caller expects us to free the input extent map. */
                free_extent_map(em);
                *map = NULL;
@@ -7526,7 +7531,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
         * We have created our ordered extent, so we can now release our reservation
         * for an outstanding extent.
         */
-       btrfs_delalloc_release_extents(BTRFS_I(inode), len);
+       btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len);
 
        /*
         * Need to update the i_size under the extent lock so buffered
@@ -8296,7 +8301,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
         * cover the full folio, like invalidating the last folio, we're
         * still safe to wait for ordered extent to finish.
         */
-       if (!(offset == 0 && length == PAGE_SIZE)) {
+       if (!(offset == 0 && length == folio_size(folio))) {
                btrfs_releasepage(&folio->page, GFP_NOFS);
                return;
        }
@@ -11107,8 +11112,23 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
         * set. We use this counter to prevent snapshots. We must increment it
         * before walking the extents because we don't want a concurrent
         * snapshot to run after we've already checked the extents.
+        *
+        * It is possible that subvolume is marked for deletion but still not
+        * removed yet. To prevent this race, we check the root status before
+        * activating the swapfile.
         */
+       spin_lock(&root->root_item_lock);
+       if (btrfs_root_dead(root)) {
+               spin_unlock(&root->root_item_lock);
+
+               btrfs_exclop_finish(fs_info);
+               btrfs_warn(fs_info,
+               "cannot activate swapfile because subvolume %llu is being deleted",
+                       root->root_key.objectid);
+               return -EPERM;
+       }
        atomic_inc(&root->nr_swapfiles);
+       spin_unlock(&root->root_item_lock);
 
        isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
 
index 238cee5b5254d1c7204e7d7a4286a6aec6ac09c8..be6c24577dbe0672589a8a0eccf3b62b2f0ec0df 100644 (file)
@@ -1239,7 +1239,7 @@ static u32 get_extent_max_capacity(const struct extent_map *em)
 }
 
 static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
-                                    bool locked)
+                                    u32 extent_thresh, u64 newer_than, bool locked)
 {
        struct extent_map *next;
        bool ret = false;
@@ -1249,11 +1249,12 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
                return false;
 
        /*
-        * We want to check if the next extent can be merged with the current
-        * one, which can be an extent created in a past generation, so we pass
-        * a minimum generation of 0 to defrag_lookup_extent().
+        * Here we need to pass @newer_then when checking the next extent, or
+        * we will hit a case we mark current extent for defrag, but the next
+        * one will not be a target.
+        * This will just cause extra IO without really reducing the fragments.
         */
-       next = defrag_lookup_extent(inode, em->start + em->len, 0, locked);
+       next = defrag_lookup_extent(inode, em->start + em->len, newer_than, locked);
        /* No more em or hole */
        if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
                goto out;
@@ -1265,6 +1266,13 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
         */
        if (next->len >= get_extent_max_capacity(em))
                goto out;
+       /* Skip older extent */
+       if (next->generation < newer_than)
+               goto out;
+       /* Also check extent size */
+       if (next->len >= extent_thresh)
+               goto out;
+
        ret = true;
 out:
        free_extent_map(next);
@@ -1470,7 +1478,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
                        goto next;
 
                next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em,
-                                                         locked);
+                                               extent_thresh, newer_than, locked);
                if (!next_mergeable) {
                        struct defrag_target_range *last;
 
@@ -5448,8 +5456,6 @@ long btrfs_ioctl(struct file *file, unsigned int
                return btrfs_ioctl_fs_info(fs_info, argp);
        case BTRFS_IOC_DEV_INFO:
                return btrfs_ioctl_dev_info(fs_info, argp);
-       case BTRFS_IOC_BALANCE:
-               return btrfs_ioctl_balance(file, NULL);
        case BTRFS_IOC_TREE_SEARCH:
                return btrfs_ioctl_tree_search(inode, argp);
        case BTRFS_IOC_TREE_SEARCH_V2:
index 04a88bfe4fcf93ec35325a08e01537609162cf71..998e3f180d90e060fe2011d0bb92f2f0523d5d20 100644 (file)
@@ -645,7 +645,7 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len,
        int ret;
 
        /*
-        * Lock destination range to serialize with concurrent readpages() and
+        * Lock destination range to serialize with concurrent readahead() and
         * source range to serialize with relocation.
         */
        btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
@@ -739,7 +739,7 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
        }
 
        /*
-        * Lock destination range to serialize with concurrent readpages() and
+        * Lock destination range to serialize with concurrent readahead() and
         * source range to serialize with relocation.
         */
        btrfs_double_extent_lock(src, off, inode, destoff, len);
index 1be7cb2f955fcbf2a9fce535b1b4d49e5c4690e9..a8cc736731fdbed9e0b7539d781dacae25b29632 100644 (file)
@@ -1896,23 +1896,18 @@ static void update_dev_time(const char *device_path)
        path_put(&path);
 }
 
-static int btrfs_rm_dev_item(struct btrfs_device *device)
+static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans,
+                            struct btrfs_device *device)
 {
        struct btrfs_root *root = device->fs_info->chunk_root;
        int ret;
        struct btrfs_path *path;
        struct btrfs_key key;
-       struct btrfs_trans_handle *trans;
 
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
 
-       trans = btrfs_start_transaction(root, 0);
-       if (IS_ERR(trans)) {
-               btrfs_free_path(path);
-               return PTR_ERR(trans);
-       }
        key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
        key.type = BTRFS_DEV_ITEM_KEY;
        key.offset = device->devid;
@@ -1923,21 +1918,12 @@ static int btrfs_rm_dev_item(struct btrfs_device *device)
        if (ret) {
                if (ret > 0)
                        ret = -ENOENT;
-               btrfs_abort_transaction(trans, ret);
-               btrfs_end_transaction(trans);
                goto out;
        }
 
        ret = btrfs_del_item(trans, root, path);
-       if (ret) {
-               btrfs_abort_transaction(trans, ret);
-               btrfs_end_transaction(trans);
-       }
-
 out:
        btrfs_free_path(path);
-       if (!ret)
-               ret = btrfs_commit_transaction(trans);
        return ret;
 }
 
@@ -2078,6 +2064,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
                    struct btrfs_dev_lookup_args *args,
                    struct block_device **bdev, fmode_t *mode)
 {
+       struct btrfs_trans_handle *trans;
        struct btrfs_device *device;
        struct btrfs_fs_devices *cur_devices;
        struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
@@ -2098,7 +2085,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
 
        ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
        if (ret)
-               goto out;
+               return ret;
 
        device = btrfs_find_device(fs_info->fs_devices, args);
        if (!device) {
@@ -2106,27 +2093,22 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
                        ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
                else
                        ret = -ENOENT;
-               goto out;
+               return ret;
        }
 
        if (btrfs_pinned_by_swapfile(fs_info, device)) {
                btrfs_warn_in_rcu(fs_info,
                  "cannot remove device %s (devid %llu) due to active swapfile",
                                  rcu_str_deref(device->name), device->devid);
-               ret = -ETXTBSY;
-               goto out;
+               return -ETXTBSY;
        }
 
-       if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
-               ret = BTRFS_ERROR_DEV_TGT_REPLACE;
-               goto out;
-       }
+       if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
+               return BTRFS_ERROR_DEV_TGT_REPLACE;
 
        if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
-           fs_info->fs_devices->rw_devices == 1) {
-               ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
-               goto out;
-       }
+           fs_info->fs_devices->rw_devices == 1)
+               return BTRFS_ERROR_DEV_ONLY_WRITABLE;
 
        if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
                mutex_lock(&fs_info->chunk_mutex);
@@ -2139,14 +2121,22 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
        if (ret)
                goto error_undo;
 
-       /*
-        * TODO: the superblock still includes this device in its num_devices
-        * counter although write_all_supers() is not locked out. This
-        * could give a filesystem state which requires a degraded mount.
-        */
-       ret = btrfs_rm_dev_item(device);
-       if (ret)
+       trans = btrfs_start_transaction(fs_info->chunk_root, 0);
+       if (IS_ERR(trans)) {
+               ret = PTR_ERR(trans);
                goto error_undo;
+       }
+
+       ret = btrfs_rm_dev_item(trans, device);
+       if (ret) {
+               /* Any error in dev item removal is critical */
+               btrfs_crit(fs_info,
+                          "failed to remove device item for devid %llu: %d",
+                          device->devid, ret);
+               btrfs_abort_transaction(trans, ret);
+               btrfs_end_transaction(trans);
+               return ret;
+       }
 
        clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
        btrfs_scrub_cancel_dev(device);
@@ -2229,7 +2219,8 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
                free_fs_devices(cur_devices);
        }
 
-out:
+       ret = btrfs_commit_transaction(trans);
+
        return ret;
 
 error_undo:
@@ -2240,7 +2231,7 @@ error_undo:
                device->fs_devices->rw_devices++;
                mutex_unlock(&fs_info->chunk_mutex);
        }
-       goto out;
+       return ret;
 }
 
 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
@@ -4439,10 +4430,12 @@ static int balance_kthread(void *data)
        struct btrfs_fs_info *fs_info = data;
        int ret = 0;
 
+       sb_start_write(fs_info->sb);
        mutex_lock(&fs_info->balance_mutex);
        if (fs_info->balance_ctl)
                ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
        mutex_unlock(&fs_info->balance_mutex);
+       sb_end_write(fs_info->sb);
 
        return ret;
 }
index b7b5fac1c779004088ecf29fb4ec69bdd940e213..1b1b310c3c510c60202e792791501272375a90d3 100644 (file)
@@ -1801,7 +1801,6 @@ struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
 
        map = em->map_lookup;
        /* We only support single profile for now */
-       ASSERT(map->num_stripes == 1);
        device = map->stripes[0].dev;
 
        free_extent_map(em);
@@ -1976,18 +1975,16 @@ int btrfs_zone_finish(struct btrfs_block_group *block_group)
 
 bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
 {
+       struct btrfs_fs_info *fs_info = fs_devices->fs_info;
        struct btrfs_device *device;
        bool ret = false;
 
-       if (!btrfs_is_zoned(fs_devices->fs_info))
+       if (!btrfs_is_zoned(fs_info))
                return true;
 
-       /* Non-single profiles are not supported yet */
-       ASSERT((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0);
-
        /* Check if there is a device with active zones left */
-       mutex_lock(&fs_devices->device_list_mutex);
-       list_for_each_entry(device, &fs_devices->devices, dev_list) {
+       mutex_lock(&fs_info->chunk_mutex);
+       list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
                struct btrfs_zoned_device_info *zinfo = device->zone_info;
 
                if (!device->bdev)
@@ -1999,7 +1996,7 @@ bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
                        break;
                }
        }
-       mutex_unlock(&fs_devices->device_list_mutex);
+       mutex_unlock(&fs_info->chunk_mutex);
 
        return ret;
 }
index d67fbe063a3a95d084405cec30cc5a3b1b48df33..2b5561ae5d0b32b038131a77db6073a0eccddb7f 100644 (file)
@@ -2352,8 +2352,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
        if (err)
                goto out;
 
-       err = pagecache_write_begin(NULL, mapping, size, 0,
-                                   AOP_FLAG_CONT_EXPAND, &page, &fsdata);
+       err = pagecache_write_begin(NULL, mapping, size, 0, 0, &page, &fsdata);
        if (err)
                goto out;
 
index bc7c7a7d9260090c525271ae89348d3572135df9..9dc81e781f2b3b566181a66b70c430eeb52daf43 100644 (file)
@@ -380,18 +380,18 @@ presubmission_error:
  * Prepare a read operation, shortening it to a cached/uncached
  * boundary as appropriate.
  */
-static enum netfs_read_source cachefiles_prepare_read(struct netfs_read_subrequest *subreq,
+static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
                                                      loff_t i_size)
 {
        enum cachefiles_prepare_read_trace why;
-       struct netfs_read_request *rreq = subreq->rreq;
+       struct netfs_io_request *rreq = subreq->rreq;
        struct netfs_cache_resources *cres = &rreq->cache_resources;
        struct cachefiles_object *object;
        struct cachefiles_cache *cache;
        struct fscache_cookie *cookie = fscache_cres_cookie(cres);
        const struct cred *saved_cred;
        struct file *file = cachefiles_cres_file(cres);
-       enum netfs_read_source ret = NETFS_DOWNLOAD_FROM_SERVER;
+       enum netfs_io_source ret = NETFS_DOWNLOAD_FROM_SERVER;
        loff_t off, to;
        ino_t ino = file ? file_inode(file)->i_ino : 0;
 
@@ -404,7 +404,7 @@ static enum netfs_read_source cachefiles_prepare_read(struct netfs_read_subreque
        }
 
        if (test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags)) {
-               __set_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags);
+               __set_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
                why = cachefiles_trace_read_no_data;
                goto out_no_object;
        }
@@ -473,7 +473,7 @@ static enum netfs_read_source cachefiles_prepare_read(struct netfs_read_subreque
        goto out;
 
 download_and_store:
-       __set_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags);
+       __set_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
 out:
        cachefiles_end_secure(cache, saved_cred);
 out_no_object:
index f256c8aff7bb5fa7db56210c7961ce56aaedd6d9..ca9f3e4ec4b3fb2fcb50e57d613c29333d04aac1 100644 (file)
@@ -57,6 +57,16 @@ static void __cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
        trace_cachefiles_mark_inactive(object, inode);
 }
 
+static void cachefiles_do_unmark_inode_in_use(struct cachefiles_object *object,
+                                             struct dentry *dentry)
+{
+       struct inode *inode = d_backing_inode(dentry);
+
+       inode_lock(inode);
+       __cachefiles_unmark_inode_in_use(object, dentry);
+       inode_unlock(inode);
+}
+
 /*
  * Unmark a backing inode and tell cachefilesd that there's something that can
  * be culled.
@@ -68,9 +78,7 @@ void cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
        struct inode *inode = file_inode(file);
 
        if (inode) {
-               inode_lock(inode);
-               __cachefiles_unmark_inode_in_use(object, file->f_path.dentry);
-               inode_unlock(inode);
+               cachefiles_do_unmark_inode_in_use(object, file->f_path.dentry);
 
                if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
                        atomic_long_add(inode->i_blocks, &cache->b_released);
@@ -484,7 +492,7 @@ struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
                                object, d_backing_inode(path.dentry), ret,
                                cachefiles_trace_trunc_error);
                        file = ERR_PTR(ret);
-                       goto out_dput;
+                       goto out_unuse;
                }
        }
 
@@ -494,15 +502,20 @@ struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
                trace_cachefiles_vfs_error(object, d_backing_inode(path.dentry),
                                           PTR_ERR(file),
                                           cachefiles_trace_open_error);
-               goto out_dput;
+               goto out_unuse;
        }
        if (unlikely(!file->f_op->read_iter) ||
            unlikely(!file->f_op->write_iter)) {
                fput(file);
                pr_notice("Cache does not support read_iter and write_iter\n");
                file = ERR_PTR(-EINVAL);
+               goto out_unuse;
        }
 
+       goto out_dput;
+
+out_unuse:
+       cachefiles_do_unmark_inode_in_use(object, path.dentry);
 out_dput:
        dput(path.dentry);
 out:
@@ -590,14 +603,16 @@ static bool cachefiles_open_file(struct cachefiles_object *object,
 check_failed:
        fscache_cookie_lookup_negative(object->cookie);
        cachefiles_unmark_inode_in_use(object, file);
-       if (ret == -ESTALE) {
-               fput(file);
-               dput(dentry);
+       fput(file);
+       dput(dentry);
+       if (ret == -ESTALE)
                return cachefiles_create_file(object);
-       }
+       return false;
+
 error_fput:
        fput(file);
 error:
+       cachefiles_do_unmark_inode_in_use(object, dentry);
        dput(dentry);
        return false;
 }
index 35465109d9c4ea70fdab56993c2339af2532b3d6..00b087c14995a0be7d407efa1f40f620a1acb7f3 100644 (file)
@@ -203,7 +203,7 @@ bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume)
        if (!buf)
                return false;
        buf->reserved = cpu_to_be32(0);
-       memcpy(buf->data, p, len);
+       memcpy(buf->data, p, volume->vcookie->coherency_len);
 
        ret = cachefiles_inject_write_error();
        if (ret == 0)
index c7a0ab0d298ba68917a9e95c953260d6f269f798..aa25bffd48237afb3a0140f68e9824396be24c7b 100644 (file)
@@ -182,7 +182,7 @@ static int ceph_releasepage(struct page *page, gfp_t gfp)
        return 1;
 }
 
-static void ceph_netfs_expand_readahead(struct netfs_read_request *rreq)
+static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
 {
        struct inode *inode = rreq->inode;
        struct ceph_inode_info *ci = ceph_inode(inode);
@@ -199,7 +199,7 @@ static void ceph_netfs_expand_readahead(struct netfs_read_request *rreq)
        rreq->len = roundup(rreq->len, lo->stripe_unit);
 }
 
-static bool ceph_netfs_clamp_length(struct netfs_read_subrequest *subreq)
+static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq)
 {
        struct inode *inode = subreq->rreq->inode;
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
@@ -218,7 +218,7 @@ static void finish_netfs_read(struct ceph_osd_request *req)
 {
        struct ceph_fs_client *fsc = ceph_inode_to_client(req->r_inode);
        struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
-       struct netfs_read_subrequest *subreq = req->r_priv;
+       struct netfs_io_subrequest *subreq = req->r_priv;
        int num_pages;
        int err = req->r_result;
 
@@ -244,9 +244,9 @@ static void finish_netfs_read(struct ceph_osd_request *req)
        iput(req->r_inode);
 }
 
-static bool ceph_netfs_issue_op_inline(struct netfs_read_subrequest *subreq)
+static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
 {
-       struct netfs_read_request *rreq = subreq->rreq;
+       struct netfs_io_request *rreq = subreq->rreq;
        struct inode *inode = rreq->inode;
        struct ceph_mds_reply_info_parsed *rinfo;
        struct ceph_mds_reply_info_in *iinfo;
@@ -258,7 +258,7 @@ static bool ceph_netfs_issue_op_inline(struct netfs_read_subrequest *subreq)
        size_t len;
 
        __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
-       __clear_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags);
+       __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
 
        if (subreq->start >= inode->i_size)
                goto out;
@@ -297,9 +297,9 @@ out:
        return true;
 }
 
-static void ceph_netfs_issue_op(struct netfs_read_subrequest *subreq)
+static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
 {
-       struct netfs_read_request *rreq = subreq->rreq;
+       struct netfs_io_request *rreq = subreq->rreq;
        struct inode *inode = rreq->inode;
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
@@ -353,6 +353,45 @@ out:
        dout("%s: result %d\n", __func__, err);
 }
 
+static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
+{
+       struct inode *inode = rreq->inode;
+       int got = 0, want = CEPH_CAP_FILE_CACHE;
+       int ret = 0;
+
+       if (rreq->origin != NETFS_READAHEAD)
+               return 0;
+
+       if (file) {
+               struct ceph_rw_context *rw_ctx;
+               struct ceph_file_info *fi = file->private_data;
+
+               rw_ctx = ceph_find_rw_context(fi);
+               if (rw_ctx)
+                       return 0;
+       }
+
+       /*
+        * readahead callers do not necessarily hold Fcb caps
+        * (e.g. fadvise, madvise).
+        */
+       ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got);
+       if (ret < 0) {
+               dout("start_read %p, error getting cap\n", inode);
+               return ret;
+       }
+
+       if (!(got & want)) {
+               dout("start_read %p, no cache cap\n", inode);
+               return -EACCES;
+       }
+       if (ret == 0)
+               return -EACCES;
+
+       rreq->netfs_priv = (void *)(uintptr_t)got;
+       return 0;
+}
+
 static void ceph_readahead_cleanup(struct address_space *mapping, void *priv)
 {
        struct inode *inode = mapping->host;
@@ -363,64 +402,16 @@ static void ceph_readahead_cleanup(struct address_space *mapping, void *priv)
                ceph_put_cap_refs(ci, got);
 }
 
-static const struct netfs_read_request_ops ceph_netfs_read_ops = {
-       .is_cache_enabled       = ceph_is_cache_enabled,
+const struct netfs_request_ops ceph_netfs_ops = {
+       .init_request           = ceph_init_request,
        .begin_cache_operation  = ceph_begin_cache_operation,
-       .issue_op               = ceph_netfs_issue_op,
+       .issue_read             = ceph_netfs_issue_read,
        .expand_readahead       = ceph_netfs_expand_readahead,
        .clamp_length           = ceph_netfs_clamp_length,
        .check_write_begin      = ceph_netfs_check_write_begin,
        .cleanup                = ceph_readahead_cleanup,
 };
 
-/* read a single page, without unlocking it. */
-static int ceph_readpage(struct file *file, struct page *subpage)
-{
-       struct folio *folio = page_folio(subpage);
-       struct inode *inode = file_inode(file);
-       struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_vino vino = ceph_vino(inode);
-       size_t len = folio_size(folio);
-       u64 off = folio_file_pos(folio);
-
-       dout("readpage ino %llx.%llx file %p off %llu len %zu folio %p index %lu\n inline %d",
-            vino.ino, vino.snap, file, off, len, folio, folio_index(folio),
-            ci->i_inline_version != CEPH_INLINE_NONE);
-
-       return netfs_readpage(file, folio, &ceph_netfs_read_ops, NULL);
-}
-
-static void ceph_readahead(struct readahead_control *ractl)
-{
-       struct inode *inode = file_inode(ractl->file);
-       struct ceph_file_info *fi = ractl->file->private_data;
-       struct ceph_rw_context *rw_ctx;
-       int got = 0;
-       int ret = 0;
-
-       if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE)
-               return;
-
-       rw_ctx = ceph_find_rw_context(fi);
-       if (!rw_ctx) {
-               /*
-                * readahead callers do not necessarily hold Fcb caps
-                * (e.g. fadvise, madvise).
-                */
-               int want = CEPH_CAP_FILE_CACHE;
-
-               ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got);
-               if (ret < 0)
-                       dout("start_read %p, error getting cap\n", inode);
-               else if (!(got & want))
-                       dout("start_read %p, no cache cap\n", inode);
-
-               if (ret <= 0)
-                       return;
-       }
-       netfs_readahead(ractl, &ceph_netfs_read_ops, (void *)(uintptr_t)got);
-}
-
 #ifdef CONFIG_CEPH_FSCACHE
 static void ceph_set_page_fscache(struct page *page)
 {
@@ -1327,8 +1318,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
        struct folio *folio = NULL;
        int r;
 
-       r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &folio, NULL,
-                             &ceph_netfs_read_ops, NULL);
+       r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &folio, NULL);
        if (r == 0)
                folio_wait_fscache(folio);
        if (r < 0) {
@@ -1382,8 +1372,8 @@ out:
 }
 
 const struct address_space_operations ceph_aops = {
-       .readpage = ceph_readpage,
-       .readahead = ceph_readahead,
+       .readpage = netfs_readpage,
+       .readahead = netfs_readahead,
        .writepage = ceph_writepage,
        .writepages = ceph_writepages_start,
        .write_begin = ceph_write_begin,
index 7d22850623efd0041b9e16e682ba7f479d581c7a..ddea999220739aaf49770c1cb1e7bc77ee2358fa 100644 (file)
@@ -29,26 +29,25 @@ void ceph_fscache_register_inode_cookie(struct inode *inode)
        if (!(inode->i_state & I_NEW))
                return;
 
-       WARN_ON_ONCE(ci->fscache);
+       WARN_ON_ONCE(ci->netfs_ctx.cache);
 
-       ci->fscache = fscache_acquire_cookie(fsc->fscache, 0,
-                                            &ci->i_vino, sizeof(ci->i_vino),
-                                            &ci->i_version, sizeof(ci->i_version),
-                                            i_size_read(inode));
+       ci->netfs_ctx.cache =
+               fscache_acquire_cookie(fsc->fscache, 0,
+                                      &ci->i_vino, sizeof(ci->i_vino),
+                                      &ci->i_version, sizeof(ci->i_version),
+                                      i_size_read(inode));
 }
 
-void ceph_fscache_unregister_inode_cookie(struct ceph_inode_infoci)
+void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info *ci)
 {
-       struct fscache_cookie *cookie = ci->fscache;
-
-       fscache_relinquish_cookie(cookie, false);
+       fscache_relinquish_cookie(ceph_fscache_cookie(ci), false);
 }
 
 void ceph_fscache_use_cookie(struct inode *inode, bool will_modify)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
 
-       fscache_use_cookie(ci->fscache, will_modify);
+       fscache_use_cookie(ceph_fscache_cookie(ci), will_modify);
 }
 
 void ceph_fscache_unuse_cookie(struct inode *inode, bool update)
@@ -58,9 +57,10 @@ void ceph_fscache_unuse_cookie(struct inode *inode, bool update)
        if (update) {
                loff_t i_size = i_size_read(inode);
 
-               fscache_unuse_cookie(ci->fscache, &ci->i_version, &i_size);
+               fscache_unuse_cookie(ceph_fscache_cookie(ci),
+                                    &ci->i_version, &i_size);
        } else {
-               fscache_unuse_cookie(ci->fscache, NULL, NULL);
+               fscache_unuse_cookie(ceph_fscache_cookie(ci), NULL, NULL);
        }
 }
 
@@ -69,14 +69,14 @@ void ceph_fscache_update(struct inode *inode)
        struct ceph_inode_info *ci = ceph_inode(inode);
        loff_t i_size = i_size_read(inode);
 
-       fscache_update_cookie(ci->fscache, &ci->i_version, &i_size);
+       fscache_update_cookie(ceph_fscache_cookie(ci), &ci->i_version, &i_size);
 }
 
 void ceph_fscache_invalidate(struct inode *inode, bool dio_write)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
 
-       fscache_invalidate(ceph_inode(inode)->fscache,
+       fscache_invalidate(ceph_fscache_cookie(ci),
                           &ci->i_version, i_size_read(inode),
                           dio_write ? FSCACHE_INVAL_DIO_WRITE : 0);
 }
index b90f3016994d1d4012d73ee05f818646efede6bc..7255b790a4c1c579827223d4b5493f204a24e053 100644 (file)
@@ -26,14 +26,9 @@ void ceph_fscache_unuse_cookie(struct inode *inode, bool update);
 void ceph_fscache_update(struct inode *inode);
 void ceph_fscache_invalidate(struct inode *inode, bool dio_write);
 
-static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci)
-{
-       ci->fscache = NULL;
-}
-
 static inline struct fscache_cookie *ceph_fscache_cookie(struct ceph_inode_info *ci)
 {
-       return ci->fscache;
+       return netfs_i_cookie(&ci->vfs_inode);
 }
 
 static inline void ceph_fscache_resize(struct inode *inode, loff_t to)
@@ -62,7 +57,7 @@ static inline int ceph_fscache_dirty_folio(struct address_space *mapping,
        return fscache_dirty_folio(mapping, folio, ceph_fscache_cookie(ci));
 }
 
-static inline int ceph_begin_cache_operation(struct netfs_read_request *rreq)
+static inline int ceph_begin_cache_operation(struct netfs_io_request *rreq)
 {
        struct fscache_cookie *cookie = ceph_fscache_cookie(ceph_inode(rreq->inode));
 
@@ -91,10 +86,6 @@ static inline void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
 {
 }
 
-static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci)
-{
-}
-
 static inline void ceph_fscache_register_inode_cookie(struct inode *inode)
 {
 }
@@ -144,7 +135,7 @@ static inline bool ceph_is_cache_enabled(struct inode *inode)
        return false;
 }
 
-static inline int ceph_begin_cache_operation(struct netfs_read_request *rreq)
+static inline int ceph_begin_cache_operation(struct netfs_io_request *rreq)
 {
        return -ENOBUFS;
 }
index feb75eb1cd82651f17e67892f359ad307fd08d16..6c9e837aa1d3d183d507d72c21f5a44aa1e54522 100644 (file)
@@ -1869,7 +1869,7 @@ retry_snap:
                 * are pending vmtruncate. So write and vmtruncate
                 * can not run at the same time
                 */
-               written = generic_perform_write(file, from, pos);
+               written = generic_perform_write(iocb, from);
                if (likely(written >= 0))
                        iocb->ki_pos = pos + written;
                ceph_end_io_write(inode);
index d80911dc91c2010ddd7da20467ffc1490132957f..63113e2a4890763e255048c31a0b228cdba13a22 100644 (file)
@@ -459,6 +459,9 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
 
        dout("alloc_inode %p\n", &ci->vfs_inode);
 
+       /* Set parameters for the netfs library */
+       netfs_i_context_init(&ci->vfs_inode, &ceph_netfs_ops);
+
        spin_lock_init(&ci->i_ceph_lock);
 
        ci->i_version = 0;
@@ -544,9 +547,6 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
        INIT_WORK(&ci->i_work, ceph_inode_work);
        ci->i_work_mask = 0;
        memset(&ci->i_btime, '\0', sizeof(ci->i_btime));
-
-       ceph_fscache_inode_init(ci);
-
        return &ci->vfs_inode;
 }
 
index a1ecc410a4952f99bdf7e171626543e05e4203ea..20ceab74e87178ef03acccad052d812fa7d147ce 100644 (file)
 #include <linux/posix_acl.h>
 #include <linux/refcount.h>
 #include <linux/security.h>
+#include <linux/netfs.h>
+#include <linux/fscache.h>
 
 #include <linux/ceph/libceph.h>
 
-#ifdef CONFIG_CEPH_FSCACHE
-#include <linux/fscache.h>
-#endif
-
 /* large granularity for statfs utilization stats to facilitate
  * large volume sizes on 32-bit machines. */
 #define CEPH_BLOCK_SHIFT   22  /* 4 MB */
@@ -318,6 +316,11 @@ struct ceph_inode_xattrs_info {
  * Ceph inode.
  */
 struct ceph_inode_info {
+       struct {
+               /* These must be contiguous */
+               struct inode vfs_inode;
+               struct netfs_i_context netfs_ctx; /* Netfslib context */
+       };
        struct ceph_vino i_vino;   /* ceph ino + snap */
 
        spinlock_t i_ceph_lock;
@@ -428,11 +431,6 @@ struct ceph_inode_info {
 
        struct work_struct i_work;
        unsigned long  i_work_mask;
-
-#ifdef CONFIG_CEPH_FSCACHE
-       struct fscache_cookie *fscache;
-#endif
-       struct inode vfs_inode; /* at end */
 };
 
 static inline struct ceph_inode_info *
@@ -1216,6 +1214,7 @@ extern void __ceph_touch_fmode(struct ceph_inode_info *ci,
 
 /* addr.c */
 extern const struct address_space_operations ceph_aops;
+extern const struct netfs_request_ops ceph_netfs_ops;
 extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
 extern int ceph_uninline_data(struct file *file);
 extern int ceph_pool_perm_check(struct inode *inode, int need);
index ea00e1a91250c9fac19992a6eef74802a6165019..9d334816eac074a5edc2233d44a2008e1956a376 100644 (file)
@@ -94,7 +94,7 @@ static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
                   le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics),
                   le32_to_cpu(tcon->fsAttrInfo.Attributes),
                   le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength),
-                  tcon->tidStatus);
+                  tcon->status);
        if (dev_type == FILE_DEVICE_DISK)
                seq_puts(m, " type: DISK ");
        else if (dev_type == FILE_DEVICE_CD_ROM)
index d1211ad4e85b343e45e43bf6e01581ca7bf82fee..2b1a1c029c75ec19376a7250fb806b516e59ad03 100644 (file)
@@ -266,22 +266,24 @@ static void cifs_kill_sb(struct super_block *sb)
         * before we kill the sb.
         */
        if (cifs_sb->root) {
+               for (node = rb_first(root); node; node = rb_next(node)) {
+                       tlink = rb_entry(node, struct tcon_link, tl_rbnode);
+                       tcon = tlink_tcon(tlink);
+                       if (IS_ERR(tcon))
+                               continue;
+                       cfid = &tcon->crfid;
+                       mutex_lock(&cfid->fid_mutex);
+                       if (cfid->dentry) {
+                               dput(cfid->dentry);
+                               cfid->dentry = NULL;
+                       }
+                       mutex_unlock(&cfid->fid_mutex);
+               }
+
+               /* finally release root dentry */
                dput(cifs_sb->root);
                cifs_sb->root = NULL;
        }
-       node = rb_first(root);
-       while (node != NULL) {
-               tlink = rb_entry(node, struct tcon_link, tl_rbnode);
-               tcon = tlink_tcon(tlink);
-               cfid = &tcon->crfid;
-               mutex_lock(&cfid->fid_mutex);
-               if (cfid->dentry) {
-                       dput(cfid->dentry);
-                       cfid->dentry = NULL;
-               }
-               mutex_unlock(&cfid->fid_mutex);
-               node = rb_next(node);
-       }
 
        kill_anon_super(sb);
        cifs_umount(cifs_sb);
@@ -699,14 +701,14 @@ static void cifs_umount_begin(struct super_block *sb)
        tcon = cifs_sb_master_tcon(cifs_sb);
 
        spin_lock(&cifs_tcp_ses_lock);
-       if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
+       if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
                /* we have other mounts to same share or we have
                   already tried to force umount this and woken up
                   all waiting network requests, nothing to do */
                spin_unlock(&cifs_tcp_ses_lock);
                return;
        } else if (tcon->tc_count == 1)
-               tcon->tidStatus = CifsExiting;
+               tcon->status = TID_EXITING;
        spin_unlock(&cifs_tcp_ses_lock);
 
        /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
@@ -944,7 +946,7 @@ cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
        ssize_t rc;
        struct inode *inode = file_inode(iocb->ki_filp);
 
-       if (iocb->ki_filp->f_flags & O_DIRECT)
+       if (iocb->ki_flags & IOCB_DIRECT)
                return cifs_user_readv(iocb, iter);
 
        rc = cifs_revalidate_mapping(inode);
index 15a5c5db038b8230cd36afab81274096f1e21db2..c0542bdcd06bcc80dd61c2034ec36b0d64ea2266 100644 (file)
@@ -153,5 +153,5 @@ extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
 #define SMB3_PRODUCT_BUILD 35
-#define CIFS_VERSION   "2.35"
+#define CIFS_VERSION   "2.36"
 #endif                         /* _CIFSFS_H */
index 48b343d0343091fa18259b8b8bf568268262b848..8de977c359b1132462c533996187d1cb1b00a873 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/mempool.h>
 #include <linux/workqueue.h>
 #include <linux/utsname.h>
+#include <linux/netfs.h>
 #include "cifs_fs_sb.h"
 #include "cifsacl.h"
 #include <crypto/internal/hash.h>
@@ -115,10 +116,18 @@ enum statusEnum {
        CifsInNegotiate,
        CifsNeedSessSetup,
        CifsInSessSetup,
-       CifsNeedTcon,
-       CifsInTcon,
-       CifsNeedFilesInvalidate,
-       CifsInFilesInvalidate
+};
+
+/* associated with each tree connection to the server */
+enum tid_status_enum {
+       TID_NEW = 0,
+       TID_GOOD,
+       TID_EXITING,
+       TID_NEED_RECON,
+       TID_NEED_TCON,
+       TID_IN_TCON,
+       TID_NEED_FILES_INVALIDATE, /* currently unused */
+       TID_IN_FILES_INVALIDATE
 };
 
 enum securityEnum {
@@ -852,13 +861,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb)
 #define CIFS_MAX_RFC1002_WSIZE ((1<<17) - 1 - sizeof(WRITE_REQ) + 4)
 #define CIFS_MAX_RFC1002_RSIZE ((1<<17) - 1 - sizeof(READ_RSP) + 4)
 
-/*
- * The default wsize is 1M. find_get_pages seems to return a maximum of 256
- * pages in a single call. With PAGE_SIZE == 4k, this means we can fill
- * a single wsize request with a single call.
- */
 #define CIFS_DEFAULT_IOSIZE (1024 * 1024)
-#define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024)
 
 /*
  * Windows only supports a max of 60kb reads and 65535 byte writes. Default to
@@ -1038,7 +1041,7 @@ struct cifs_tcon {
        char *password;         /* for share-level security */
        __u32 tid;              /* The 4 byte tree id */
        __u16 Flags;            /* optional support bits */
-       enum statusEnum tidStatus;
+       enum tid_status_enum status;
        atomic_t num_smbs_sent;
        union {
                struct {
@@ -1402,6 +1405,11 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
  */
 
 struct cifsInodeInfo {
+       struct {
+               /* These must be contiguous */
+               struct inode    vfs_inode;      /* the VFS's inode record */
+               struct netfs_i_context netfs_ctx; /* Netfslib context */
+       };
        bool can_cache_brlcks;
        struct list_head llist; /* locks helb by this inode */
        /*
@@ -1432,10 +1440,6 @@ struct cifsInodeInfo {
        u64  uniqueid;                  /* server inode number */
        u64  createtime;                /* creation time on server */
        __u8 lease_key[SMB2_LEASE_KEY_SIZE];    /* lease key for this inode */
-#ifdef CONFIG_CIFS_FSCACHE
-       struct fscache_cookie *fscache;
-#endif
-       struct inode vfs_inode;
        struct list_head deferred_closes; /* list of deferred closes */
        spinlock_t deferred_lock; /* protection on deferred list */
        bool lease_granted; /* Flag to indicate whether lease or oplock is granted. */
index 68b9a436af4bf322c5958ac2283a8e59e160ee7a..aeba371c4c707b5164ce1fdbf7657eee3c8444e9 100644 (file)
  */
 #define CIFS_SESS_KEY_SIZE (16)
 
-/*
- * Size of the smb3 signing key
- */
-#define SMB3_SIGN_KEY_SIZE (16)
-
-/*
- * Size of the smb3 encryption/decryption key storage.
- * This size is big enough to store any cipher key types.
- */
-#define SMB3_ENC_DEC_KEY_SIZE (32)
-
-#define CIFS_CLIENT_CHALLENGE_SIZE (8)
 #define CIFS_SERVER_CHALLENGE_SIZE (8)
 #define CIFS_HMAC_MD5_HASH_SIZE (16)
 #define CIFS_CPHTXT_SIZE (16)
@@ -1658,7 +1646,7 @@ struct smb_t2_rsp {
 #define SMB_FIND_FILE_ID_FULL_DIR_INFO    0x105
 #define SMB_FIND_FILE_ID_BOTH_DIR_INFO    0x106
 #define SMB_FIND_FILE_UNIX                0x202
-#define SMB_FIND_FILE_POSIX_INFO          0x064
+/* #define SMB_FIND_FILE_POSIX_INFO          0x064 */
 
 typedef struct smb_com_transaction2_qpi_req {
        struct smb_hdr hdr;     /* wct = 14+ */
index 071e2f21a7db7e4713f79278a8ea2c7d7418ee9a..47e927c4ff8de3fb2b9e87daf48873b80117a8da 100644 (file)
@@ -75,12 +75,11 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
 
        /* only send once per connect */
        spin_lock(&cifs_tcp_ses_lock);
-       if (tcon->ses->status != CifsGood ||
-           tcon->tidStatus != CifsNeedReconnect) {
+       if ((tcon->ses->status != CifsGood) || (tcon->status != TID_NEED_RECON)) {
                spin_unlock(&cifs_tcp_ses_lock);
                return;
        }
-       tcon->tidStatus = CifsInFilesInvalidate;
+       tcon->status = TID_IN_FILES_INVALIDATE;
        spin_unlock(&cifs_tcp_ses_lock);
 
        /* list all files open on tree connection and mark them invalid */
@@ -100,8 +99,8 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
        mutex_unlock(&tcon->crfid.fid_mutex);
 
        spin_lock(&cifs_tcp_ses_lock);
-       if (tcon->tidStatus == CifsInFilesInvalidate)
-               tcon->tidStatus = CifsNeedTcon;
+       if (tcon->status == TID_IN_FILES_INVALIDATE)
+               tcon->status = TID_NEED_TCON;
        spin_unlock(&cifs_tcp_ses_lock);
 
        /*
@@ -136,7 +135,7 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
         * have tcon) are allowed as we start force umount
         */
        spin_lock(&cifs_tcp_ses_lock);
-       if (tcon->tidStatus == CifsExiting) {
+       if (tcon->status == TID_EXITING) {
                if (smb_command != SMB_COM_WRITE_ANDX &&
                    smb_command != SMB_COM_OPEN_ANDX &&
                    smb_command != SMB_COM_TREE_DISCONNECT) {
@@ -597,7 +596,7 @@ CIFSSMBNegotiate(const unsigned int xid,
        set_credits(server, server->maxReq);
        /* probably no need to store and check maxvcs */
        server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize);
-       /* set up max_read for readpages check */
+       /* set up max_read for readahead check */
        server->max_read = server->maxBuf;
        server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
        cifs_dbg(NOISY, "Max buf = %d\n", ses->server->maxBuf);
index 9964c363432254573df2020e84e7fc5ff27fbc6e..902e8c6c0f9c27890690515d3886fcb85a555026 100644 (file)
@@ -245,7 +245,7 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
 
                list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
                        tcon->need_reconnect = true;
-                       tcon->tidStatus = CifsNeedReconnect;
+                       tcon->status = TID_NEED_RECON;
                }
                if (ses->tcon_ipc)
                        ses->tcon_ipc->need_reconnect = true;
@@ -453,9 +453,7 @@ static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_
        return rc;
 }
 
-static int
-reconnect_dfs_server(struct TCP_Server_Info *server,
-                    bool mark_smb_session)
+static int reconnect_dfs_server(struct TCP_Server_Info *server)
 {
        int rc = 0;
        const char *refpath = server->current_fullpath + 1;
@@ -479,7 +477,12 @@ reconnect_dfs_server(struct TCP_Server_Info *server,
        if (!cifs_tcp_ses_needs_reconnect(server, num_targets))
                return 0;
 
-       cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session);
+       /*
+        * Unconditionally mark all sessions & tcons for reconnect as we might be connecting to a
+        * different server or share during failover.  It could be improved by adding some logic to
+        * only do that in case it connects to a different server or share, though.
+        */
+       cifs_mark_tcp_ses_conns_for_reconnect(server, true);
 
        cifs_abort_connection(server);
 
@@ -537,7 +540,7 @@ int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
        }
        spin_unlock(&cifs_tcp_ses_lock);
 
-       return reconnect_dfs_server(server, mark_smb_session);
+       return reconnect_dfs_server(server);
 }
 #else
 int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
@@ -1046,7 +1049,7 @@ smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
                spin_unlock(&server->req_lock);
                wake_up(&server->request_q);
 
-               trace_smb3_add_credits(server->CurrentMid,
+               trace_smb3_hdr_credits(server->CurrentMid,
                                server->conn_id, server->hostname, scredits,
                                le16_to_cpu(shdr->CreditRequest), in_flight);
                cifs_server_dbg(FYI, "%s: added %u credits total=%d\n",
@@ -2207,7 +2210,7 @@ get_ses_fail:
 
 static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
 {
-       if (tcon->tidStatus == CifsExiting)
+       if (tcon->status == TID_EXITING)
                return 0;
        if (strncmp(tcon->treeName, ctx->UNC, MAX_TREE_SIZE))
                return 0;
@@ -3513,6 +3516,9 @@ static int connect_dfs_target(struct mount_ctx *mnt_ctx, const char *full_path,
        struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
        char *oldmnt = cifs_sb->ctx->mount_options;
 
+       cifs_dbg(FYI, "%s: full_path=%s ref_path=%s target=%s\n", __func__, full_path, ref_path,
+                dfs_cache_get_tgt_name(tit));
+
        rc = dfs_cache_get_tgt_referral(ref_path, tit, &ref);
        if (rc)
                goto out;
@@ -3611,13 +3617,18 @@ static int __follow_dfs_link(struct mount_ctx *mnt_ctx)
        if (rc)
                goto out;
 
-       /* Try all dfs link targets */
+       /* Try all dfs link targets.  If an I/O fails from currently connected DFS target with an
+        * error other than STATUS_PATH_NOT_COVERED (-EREMOTE), then retry it from other targets as
+        * specified in MS-DFSC "3.1.5.2 I/O Operation to Target Fails with an Error Other Than
+        * STATUS_PATH_NOT_COVERED."
+        */
        for (rc = -ENOENT, tit = dfs_cache_get_tgt_iterator(&tl);
             tit; tit = dfs_cache_get_next_tgt(&tl, tit)) {
                rc = connect_dfs_target(mnt_ctx, full_path, mnt_ctx->leaf_fullpath + 1, tit);
                if (!rc) {
                        rc = is_path_remote(mnt_ctx);
-                       break;
+                       if (!rc || rc == -EREMOTE)
+                               break;
                }
        }
 
@@ -3691,7 +3702,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
                goto error;
 
        rc = is_path_remote(&mnt_ctx);
-       if (rc == -EREMOTE)
+       if (rc)
                rc = follow_dfs_link(&mnt_ctx);
        if (rc)
                goto error;
@@ -4457,7 +4468,7 @@ static int tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tco
         */
        if (rc && server->current_fullpath != server->origin_fullpath) {
                server->current_fullpath = server->origin_fullpath;
-               cifs_reconnect(tcon->ses->server, true);
+               cifs_signal_cifsd_for_reconnect(server, true);
        }
 
        dfs_cache_free_tgts(tl);
@@ -4478,12 +4489,12 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
        /* only send once per connect */
        spin_lock(&cifs_tcp_ses_lock);
        if (tcon->ses->status != CifsGood ||
-           (tcon->tidStatus != CifsNew &&
-           tcon->tidStatus != CifsNeedTcon)) {
+           (tcon->status != TID_NEW &&
+           tcon->status != TID_NEED_TCON)) {
                spin_unlock(&cifs_tcp_ses_lock);
                return 0;
        }
-       tcon->tidStatus = CifsInTcon;
+       tcon->status = TID_IN_TCON;
        spin_unlock(&cifs_tcp_ses_lock);
 
        tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
@@ -4524,13 +4535,13 @@ out:
 
        if (rc) {
                spin_lock(&cifs_tcp_ses_lock);
-               if (tcon->tidStatus == CifsInTcon)
-                       tcon->tidStatus = CifsNeedTcon;
+               if (tcon->status == TID_IN_TCON)
+                       tcon->status = TID_NEED_TCON;
                spin_unlock(&cifs_tcp_ses_lock);
        } else {
                spin_lock(&cifs_tcp_ses_lock);
-               if (tcon->tidStatus == CifsInTcon)
-                       tcon->tidStatus = CifsGood;
+               if (tcon->status == TID_IN_TCON)
+                       tcon->status = TID_GOOD;
                spin_unlock(&cifs_tcp_ses_lock);
                tcon->need_reconnect = false;
        }
@@ -4546,24 +4557,24 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
        /* only send once per connect */
        spin_lock(&cifs_tcp_ses_lock);
        if (tcon->ses->status != CifsGood ||
-           (tcon->tidStatus != CifsNew &&
-           tcon->tidStatus != CifsNeedTcon)) {
+           (tcon->status != TID_NEW &&
+           tcon->status != TID_NEED_TCON)) {
                spin_unlock(&cifs_tcp_ses_lock);
                return 0;
        }
-       tcon->tidStatus = CifsInTcon;
+       tcon->status = TID_IN_TCON;
        spin_unlock(&cifs_tcp_ses_lock);
 
        rc = ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc);
        if (rc) {
                spin_lock(&cifs_tcp_ses_lock);
-               if (tcon->tidStatus == CifsInTcon)
-                       tcon->tidStatus = CifsNeedTcon;
+               if (tcon->status == TID_IN_TCON)
+                       tcon->status = TID_NEED_TCON;
                spin_unlock(&cifs_tcp_ses_lock);
        } else {
                spin_lock(&cifs_tcp_ses_lock);
-               if (tcon->tidStatus == CifsInTcon)
-                       tcon->tidStatus = CifsGood;
+               if (tcon->status == TID_IN_TCON)
+                       tcon->status = TID_GOOD;
                spin_unlock(&cifs_tcp_ses_lock);
                tcon->need_reconnect = false;
        }
index 60f43bff7ccb3bfa9288dbd727dbb7d8fa8d64f4..d511a78383c38ec7ce681fc74d2c4800f7b908ad 100644 (file)
@@ -4210,13 +4210,19 @@ cifs_page_mkwrite(struct vm_fault *vmf)
 {
        struct page *page = vmf->page;
 
+       /* Wait for the page to be written to the cache before we allow it to
+        * be modified.  We then assume the entire page will need writing back.
+        */
 #ifdef CONFIG_CIFS_FSCACHE
        if (PageFsCache(page) &&
            wait_on_page_fscache_killable(page) < 0)
                return VM_FAULT_RETRY;
 #endif
 
-       lock_page(page);
+       wait_on_page_writeback(page);
+
+       if (lock_page_killable(page) < 0)
+               return VM_FAULT_RETRY;
        return VM_FAULT_LOCKED;
 }
 
index 33af72e0ac0c52d7d3f9f8d23275f9bba2574078..a638b29e906201a3128eb91e3170a673ca649df8 100644 (file)
@@ -103,7 +103,7 @@ void cifs_fscache_get_inode_cookie(struct inode *inode)
 
        cifs_fscache_fill_coherency(&cifsi->vfs_inode, &cd);
 
-       cifsi->fscache =
+       cifsi->netfs_ctx.cache =
                fscache_acquire_cookie(tcon->fscache, 0,
                                       &cifsi->uniqueid, sizeof(cifsi->uniqueid),
                                       &cd, sizeof(cd),
@@ -126,22 +126,15 @@ void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update)
 void cifs_fscache_release_inode_cookie(struct inode *inode)
 {
        struct cifsInodeInfo *cifsi = CIFS_I(inode);
+       struct fscache_cookie *cookie = cifs_inode_cookie(inode);
 
-       if (cifsi->fscache) {
-               cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cifsi->fscache);
-               fscache_relinquish_cookie(cifsi->fscache, false);
-               cifsi->fscache = NULL;
+       if (cookie) {
+               cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cookie);
+               fscache_relinquish_cookie(cookie, false);
+               cifsi->netfs_ctx.cache = NULL;
        }
 }
 
-static inline void fscache_end_operation(struct netfs_cache_resources *cres)
-{
-       const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
-
-       if (ops)
-               ops->end_operation(cres);
-}
-
 /*
  * Fallback page reading interface.
  */
index 55129908e2c131e2080a44978d9eb45e734edf2d..52355c0912aee44bb55d9a259ee3f233b07da8a8 100644 (file)
@@ -61,7 +61,7 @@ void cifs_fscache_fill_coherency(struct inode *inode,
 
 static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode)
 {
-       return CIFS_I(inode)->fscache;
+       return netfs_i_cookie(inode);
 }
 
 static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags)
index 60d853c92f6aaedf6248db56f79350f7eb6947f4..2f9e7d2f81b6f5bab7866b8ba0fb73d6f1ebfd51 100644 (file)
@@ -49,7 +49,7 @@ static void cifs_set_ops(struct inode *inode)
                        inode->i_fop = &cifs_file_ops;
                }
 
-               /* check if server can support readpages */
+               /* check if server can support readahead */
                if (cifs_sb_master_tcon(cifs_sb)->ses->server->max_read <
                                PAGE_SIZE + MAX_CIFS_HDR_SIZE)
                        inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
index 852e54ee82c28279c00e6013b19916f3815a8b70..bbdf3281559c8f53c0e2c9d281f8fd5b4921e11b 100644 (file)
@@ -85,6 +85,9 @@ parse_mf_symlink(const u8 *buf, unsigned int buf_len, unsigned int *_link_len,
        if (rc != 1)
                return -EINVAL;
 
+       if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN)
+               return -EINVAL;
+
        rc = symlink_hash(link_len, link_str, md5_hash);
        if (rc) {
                cifs_dbg(FYI, "%s: MD5 hash failure: %d\n", __func__, rc);
index 56598f7dbe00d8ba3001c52f31141096c85d226b..afaf59c221936f3023c518be2ac6f2d95a9e1e28 100644 (file)
@@ -116,7 +116,7 @@ tconInfoAlloc(void)
        }
 
        atomic_inc(&tconInfoAllocCount);
-       ret_buf->tidStatus = CifsNew;
+       ret_buf->status = TID_NEW;
        ++ret_buf->tc_count;
        INIT_LIST_HEAD(&ret_buf->openFileList);
        INIT_LIST_HEAD(&ret_buf->tcon_list);
index ebe236b9d9f56e35c1d1b65ad9038421f6b6d894..235aa1b395ebcce0b9ba1bfb42a62909089f3182 100644 (file)
@@ -896,7 +896,7 @@ map_and_check_smb_error(struct mid_q_entry *mid, bool logErr)
                if (class == ERRSRV && code == ERRbaduid) {
                        cifs_dbg(FYI, "Server returned 0x%x, reconnecting session...\n",
                                code);
-                       cifs_reconnect(mid->server, false);
+                       cifs_signal_cifsd_for_reconnect(mid->server, false);
                }
        }
 
index 4125fd113cfbaedd36ba86ae6d2f7da486123402..82e916ad167c00b9fac7c5ea36756302b867287e 100644 (file)
 #define END_OF_CHAIN 4
 #define RELATED_REQUEST 8
 
-#define SMB2_SIGNATURE_SIZE (16)
-#define SMB2_NTLMV2_SESSKEY_SIZE (16)
-#define SMB2_HMACSHA256_SIZE (32)
-#define SMB2_CMACAES_SIZE (16)
-#define SMB3_SIGNKEY_SIZE (16)
-#define SMB3_GCM128_CRYPTKEY_SIZE (16)
-#define SMB3_GCM256_CRYPTKEY_SIZE (32)
-
-/* Maximum buffer size value we can send with 1 credit */
-#define SMB2_MAX_BUFFER_SIZE 65536
-
 #endif /* _SMB2_GLOB_H */
index b25623e3fe3d5596e10202c5d8abcc2fd96f76f2..3fe47a88f47d0d0a373d4cb8ce0c13bb57b6b9d2 100644 (file)
@@ -150,16 +150,18 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
                struct smb2_transform_hdr *thdr =
                        (struct smb2_transform_hdr *)buf;
                struct cifs_ses *ses = NULL;
+               struct cifs_ses *iter;
 
                /* decrypt frame now that it is completely read in */
                spin_lock(&cifs_tcp_ses_lock);
-               list_for_each_entry(ses, &srvr->smb_ses_list, smb_ses_list) {
-                       if (ses->Suid == le64_to_cpu(thdr->SessionId))
+               list_for_each_entry(iter, &srvr->smb_ses_list, smb_ses_list) {
+                       if (iter->Suid == le64_to_cpu(thdr->SessionId)) {
+                               ses = iter;
                                break;
+                       }
                }
                spin_unlock(&cifs_tcp_ses_lock);
-               if (list_entry_is_head(ses, &srvr->smb_ses_list,
-                                      smb_ses_list)) {
+               if (!ses) {
                        cifs_dbg(VFS, "no decryption - session id not found\n");
                        return 1;
                }
@@ -203,7 +205,7 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
 
        if (smb2_rsp_struct_sizes[command] != pdu->StructureSize2) {
                if (command != SMB2_OPLOCK_BREAK_HE && (shdr->Status == 0 ||
-                   pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2)) {
+                   pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2_LE)) {
                        /* error packets have 9 byte structure size */
                        cifs_dbg(VFS, "Invalid response size %u for command %d\n",
                                 le16_to_cpu(pdu->StructureSize2), command);
@@ -303,7 +305,7 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *shdr)
        /* error responses do not have data area */
        if (shdr->Status && shdr->Status != STATUS_MORE_PROCESSING_REQUIRED &&
            (((struct smb2_err_rsp *)shdr)->StructureSize) ==
-                                               SMB2_ERROR_STRUCTURE_SIZE2)
+                                               SMB2_ERROR_STRUCTURE_SIZE2_LE)
                return NULL;
 
        /*
@@ -478,11 +480,11 @@ smb2_get_lease_state(struct cifsInodeInfo *cinode)
        __le32 lease = 0;
 
        if (CIFS_CACHE_WRITE(cinode))
-               lease |= SMB2_LEASE_WRITE_CACHING;
+               lease |= SMB2_LEASE_WRITE_CACHING_LE;
        if (CIFS_CACHE_HANDLE(cinode))
-               lease |= SMB2_LEASE_HANDLE_CACHING;
+               lease |= SMB2_LEASE_HANDLE_CACHING_LE;
        if (CIFS_CACHE_READ(cinode))
-               lease |= SMB2_LEASE_READ_CACHING;
+               lease |= SMB2_LEASE_READ_CACHING_LE;
        return lease;
 }
 
@@ -832,8 +834,8 @@ smb2_handle_cancelled_mid(struct mid_q_entry *mid, struct TCP_Server_Info *serve
        rc = __smb2_handle_cancelled_cmd(tcon,
                                         le16_to_cpu(hdr->Command),
                                         le64_to_cpu(hdr->MessageId),
-                                        le64_to_cpu(rsp->PersistentFileId),
-                                        le64_to_cpu(rsp->VolatileFileId));
+                                        rsp->PersistentFileId,
+                                        rsp->VolatileFileId);
        if (rc)
                cifs_put_tcon(tcon);
 
index 891b11576e557b090503fcce4caff1f18de89333..a67df8eaf7026186b6e9efa0413731ff3ae4d167 100644 (file)
@@ -86,6 +86,9 @@ smb2_add_credits(struct TCP_Server_Info *server,
        if (*val > 65000) {
                *val = 65000; /* Don't get near 64K credits, avoid srv bugs */
                pr_warn_once("server overflowed SMB3 credits\n");
+               trace_smb3_overflow_credits(server->CurrentMid,
+                                           server->conn_id, server->hostname, *val,
+                                           add, server->in_flight);
        }
        server->in_flight--;
        if (server->in_flight == 0 &&
@@ -251,7 +254,7 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
        in_flight = server->in_flight;
        spin_unlock(&server->req_lock);
 
-       trace_smb3_add_credits(server->CurrentMid,
+       trace_smb3_wait_credits(server->CurrentMid,
                        server->conn_id, server->hostname, scredits, -(credits->value), in_flight);
        cifs_dbg(FYI, "%s: removed %u credits total=%d\n",
                        __func__, credits->value, scredits);
@@ -300,7 +303,7 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
        spin_unlock(&server->req_lock);
        wake_up(&server->request_q);
 
-       trace_smb3_add_credits(server->CurrentMid,
+       trace_smb3_adj_credits(server->CurrentMid,
                        server->conn_id, server->hostname, scredits,
                        credits->value - new_val, in_flight);
        cifs_dbg(FYI, "%s: adjust added %u credits total=%d\n",
@@ -897,8 +900,8 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
        atomic_inc(&tcon->num_remote_opens);
 
        o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
-       oparms.fid->persistent_fid = le64_to_cpu(o_rsp->PersistentFileId);
-       oparms.fid->volatile_fid = le64_to_cpu(o_rsp->VolatileFileId);
+       oparms.fid->persistent_fid = o_rsp->PersistentFileId;
+       oparms.fid->volatile_fid = o_rsp->VolatileFileId;
 #ifdef CONFIG_CIFS_DEBUG2
        oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
 #endif /* CIFS_DEBUG2 */
@@ -1192,17 +1195,12 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
               struct cifs_sb_info *cifs_sb)
 {
        int rc;
-       __le16 *utf16_path;
        struct kvec rsp_iov = {NULL, 0};
        int buftype = CIFS_NO_BUFFER;
        struct smb2_query_info_rsp *rsp;
        struct smb2_file_full_ea_info *info = NULL;
 
-       utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
-       if (!utf16_path)
-               return -ENOMEM;
-
-       rc = smb2_query_info_compound(xid, tcon, utf16_path,
+       rc = smb2_query_info_compound(xid, tcon, path,
                                      FILE_READ_EA,
                                      FILE_FULL_EA_INFORMATION,
                                      SMB2_O_INFO_FILE,
@@ -1235,7 +1233,6 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
                        le32_to_cpu(rsp->OutputBufferLength), ea_name);
 
  qeas_exit:
-       kfree(utf16_path);
        free_rsp_buf(buftype, rsp_iov.iov_base);
        return rc;
 }
@@ -1295,7 +1292,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
                         * the new EA. If not we should not add it since we
                         * would not be able to even read the EAs back.
                         */
-                       rc = smb2_query_info_compound(xid, tcon, utf16_path,
+                       rc = smb2_query_info_compound(xid, tcon, path,
                                      FILE_READ_EA,
                                      FILE_FULL_EA_INFORMATION,
                                      SMB2_O_INFO_FILE,
@@ -1643,6 +1640,7 @@ smb2_ioctl_query_info(const unsigned int xid,
        unsigned int size[2];
        void *data[2];
        int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
+       void (*free_req1_func)(struct smb_rqst *r);
 
        vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
        if (vars == NULL)
@@ -1652,27 +1650,29 @@ smb2_ioctl_query_info(const unsigned int xid,
 
        resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
 
-       if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
-               goto e_fault;
-
+       if (copy_from_user(&qi, arg, sizeof(struct smb_query_info))) {
+               rc = -EFAULT;
+               goto free_vars;
+       }
        if (qi.output_buffer_length > 1024) {
-               kfree(vars);
-               return -EINVAL;
+               rc = -EINVAL;
+               goto free_vars;
        }
 
        if (!ses || !server) {
-               kfree(vars);
-               return -EIO;
+               rc = -EIO;
+               goto free_vars;
        }
 
        if (smb3_encryption_required(tcon))
                flags |= CIFS_TRANSFORM_REQ;
 
-       buffer = memdup_user(arg + sizeof(struct smb_query_info),
-                            qi.output_buffer_length);
-       if (IS_ERR(buffer)) {
-               kfree(vars);
-               return PTR_ERR(buffer);
+       if (qi.output_buffer_length) {
+               buffer = memdup_user(arg + sizeof(struct smb_query_info), qi.output_buffer_length);
+               if (IS_ERR(buffer)) {
+                       rc = PTR_ERR(buffer);
+                       goto free_vars;
+               }
        }
 
        /* Open */
@@ -1710,45 +1710,45 @@ smb2_ioctl_query_info(const unsigned int xid,
        rc = SMB2_open_init(tcon, server,
                            &rqst[0], &oplock, &oparms, path);
        if (rc)
-               goto iqinf_exit;
+               goto free_output_buffer;
        smb2_set_next_command(tcon, &rqst[0]);
 
        /* Query */
        if (qi.flags & PASSTHRU_FSCTL) {
                /* Can eventually relax perm check since server enforces too */
-               if (!capable(CAP_SYS_ADMIN))
+               if (!capable(CAP_SYS_ADMIN)) {
                        rc = -EPERM;
-               else  {
-                       rqst[1].rq_iov = &vars->io_iov[0];
-                       rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
-
-                       rc = SMB2_ioctl_init(tcon, server,
-                                            &rqst[1],
-                                            COMPOUND_FID, COMPOUND_FID,
-                                            qi.info_type, true, buffer,
-                                            qi.output_buffer_length,
-                                            CIFSMaxBufSize -
-                                            MAX_SMB2_CREATE_RESPONSE_SIZE -
-                                            MAX_SMB2_CLOSE_RESPONSE_SIZE);
+                       goto free_open_req;
                }
+               rqst[1].rq_iov = &vars->io_iov[0];
+               rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
+
+               rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
+                                    qi.info_type, true, buffer, qi.output_buffer_length,
+                                    CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
+                                    MAX_SMB2_CLOSE_RESPONSE_SIZE);
+               free_req1_func = SMB2_ioctl_free;
        } else if (qi.flags == PASSTHRU_SET_INFO) {
                /* Can eventually relax perm check since server enforces too */
-               if (!capable(CAP_SYS_ADMIN))
+               if (!capable(CAP_SYS_ADMIN)) {
                        rc = -EPERM;
-               else  {
-                       rqst[1].rq_iov = &vars->si_iov[0];
-                       rqst[1].rq_nvec = 1;
-
-                       size[0] = 8;
-                       data[0] = buffer;
-
-                       rc = SMB2_set_info_init(tcon, server,
-                                       &rqst[1],
-                                       COMPOUND_FID, COMPOUND_FID,
-                                       current->tgid,
-                                       FILE_END_OF_FILE_INFORMATION,
-                                       SMB2_O_INFO_FILE, 0, data, size);
+                       goto free_open_req;
                }
+               if (qi.output_buffer_length < 8) {
+                       rc = -EINVAL;
+                       goto free_open_req;
+               }
+               rqst[1].rq_iov = &vars->si_iov[0];
+               rqst[1].rq_nvec = 1;
+
+               /* MS-FSCC 2.4.13 FileEndOfFileInformation */
+               size[0] = 8;
+               data[0] = buffer;
+
+               rc = SMB2_set_info_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
+                                       current->tgid, FILE_END_OF_FILE_INFORMATION,
+                                       SMB2_O_INFO_FILE, 0, data, size);
+               free_req1_func = SMB2_set_info_free;
        } else if (qi.flags == PASSTHRU_QUERY_INFO) {
                rqst[1].rq_iov = &vars->qi_iov[0];
                rqst[1].rq_nvec = 1;
@@ -1759,6 +1759,7 @@ smb2_ioctl_query_info(const unsigned int xid,
                                  qi.info_type, qi.additional_information,
                                  qi.input_buffer_length,
                                  qi.output_buffer_length, buffer);
+               free_req1_func = SMB2_query_info_free;
        } else { /* unknown flags */
                cifs_tcon_dbg(VFS, "Invalid passthru query flags: 0x%x\n",
                              qi.flags);
@@ -1766,7 +1767,7 @@ smb2_ioctl_query_info(const unsigned int xid,
        }
 
        if (rc)
-               goto iqinf_exit;
+               goto free_open_req;
        smb2_set_next_command(tcon, &rqst[1]);
        smb2_set_related(&rqst[1]);
 
@@ -1777,14 +1778,14 @@ smb2_ioctl_query_info(const unsigned int xid,
        rc = SMB2_close_init(tcon, server,
                             &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
        if (rc)
-               goto iqinf_exit;
+               goto free_req_1;
        smb2_set_related(&rqst[2]);
 
        rc = compound_send_recv(xid, ses, server,
                                flags, 3, rqst,
                                resp_buftype, rsp_iov);
        if (rc)
-               goto iqinf_exit;
+               goto out;
 
        /* No need to bump num_remote_opens since handle immediately closed */
        if (qi.flags & PASSTHRU_FSCTL) {
@@ -1794,18 +1795,22 @@ smb2_ioctl_query_info(const unsigned int xid,
                        qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
                if (qi.input_buffer_length > 0 &&
                    le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length
-                   > rsp_iov[1].iov_len)
-                       goto e_fault;
+                   > rsp_iov[1].iov_len) {
+                       rc = -EFAULT;
+                       goto out;
+               }
 
                if (copy_to_user(&pqi->input_buffer_length,
                                 &qi.input_buffer_length,
-                                sizeof(qi.input_buffer_length)))
-                       goto e_fault;
+                                sizeof(qi.input_buffer_length))) {
+                       rc = -EFAULT;
+                       goto out;
+               }
 
                if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
                                 (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
                                 qi.input_buffer_length))
-                       goto e_fault;
+                       rc = -EFAULT;
        } else {
                pqi = (struct smb_query_info __user *)arg;
                qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
@@ -1813,28 +1818,30 @@ smb2_ioctl_query_info(const unsigned int xid,
                        qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
                if (copy_to_user(&pqi->input_buffer_length,
                                 &qi.input_buffer_length,
-                                sizeof(qi.input_buffer_length)))
-                       goto e_fault;
+                                sizeof(qi.input_buffer_length))) {
+                       rc = -EFAULT;
+                       goto out;
+               }
 
                if (copy_to_user(pqi + 1, qi_rsp->Buffer,
                                 qi.input_buffer_length))
-                       goto e_fault;
+                       rc = -EFAULT;
        }
 
- iqinf_exit:
-       cifs_small_buf_release(rqst[0].rq_iov[0].iov_base);
-       cifs_small_buf_release(rqst[1].rq_iov[0].iov_base);
-       cifs_small_buf_release(rqst[2].rq_iov[0].iov_base);
+out:
        free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
        free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
        free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
-       kfree(vars);
+       SMB2_close_free(&rqst[2]);
+free_req_1:
+       free_req1_func(&rqst[1]);
+free_open_req:
+       SMB2_open_free(&rqst[0]);
+free_output_buffer:
        kfree(buffer);
+free_vars:
+       kfree(vars);
        return rc;
-
-e_fault:
-       rc = -EFAULT;
-       goto iqinf_exit;
 }
 
 static ssize_t
@@ -2407,8 +2414,8 @@ again:
                cifs_dbg(FYI, "query_dir_first: open failed rc=%d\n", rc);
                goto qdf_free;
        }
-       fid->persistent_fid = le64_to_cpu(op_rsp->PersistentFileId);
-       fid->volatile_fid = le64_to_cpu(op_rsp->VolatileFileId);
+       fid->persistent_fid = op_rsp->PersistentFileId;
+       fid->volatile_fid = op_rsp->VolatileFileId;
 
        /* Anything else than ENODATA means a genuine error */
        if (rc && rc != -ENODATA) {
@@ -2488,7 +2495,7 @@ smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
                spin_unlock(&server->req_lock);
                wake_up(&server->request_q);
 
-               trace_smb3_add_credits(server->CurrentMid,
+               trace_smb3_pend_credits(server->CurrentMid,
                                server->conn_id, server->hostname, scredits,
                                le16_to_cpu(shdr->CreditRequest), in_flight);
                cifs_dbg(FYI, "%s: status pending add %u credits total=%d\n",
@@ -2646,7 +2653,7 @@ smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
  */
 int
 smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
-                        __le16 *utf16_path, u32 desired_access,
+                        const char *path, u32 desired_access,
                         u32 class, u32 type, u32 output_len,
                         struct kvec *rsp, int *buftype,
                         struct cifs_sb_info *cifs_sb)
@@ -2664,6 +2671,14 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
        struct cifs_open_parms oparms;
        struct cifs_fid fid;
        int rc;
+       __le16 *utf16_path;
+       struct cached_fid *cfid = NULL;
+
+       if (!path)
+               path = "";
+       utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+       if (!utf16_path)
+               return -ENOMEM;
 
        if (smb3_encryption_required(tcon))
                flags |= CIFS_TRANSFORM_REQ;
@@ -2672,6 +2687,8 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
        resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
        memset(rsp_iov, 0, sizeof(rsp_iov));
 
+       rc = open_cached_dir(xid, tcon, path, cifs_sb, &cfid);
+
        memset(&open_iov, 0, sizeof(open_iov));
        rqst[0].rq_iov = open_iov;
        rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
@@ -2693,15 +2710,29 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
        rqst[1].rq_iov = qi_iov;
        rqst[1].rq_nvec = 1;
 
-       rc = SMB2_query_info_init(tcon, server,
-                                 &rqst[1], COMPOUND_FID, COMPOUND_FID,
-                                 class, type, 0,
-                                 output_len, 0,
-                                 NULL);
+       if (cfid) {
+               rc = SMB2_query_info_init(tcon, server,
+                                         &rqst[1],
+                                         cfid->fid->persistent_fid,
+                                         cfid->fid->volatile_fid,
+                                         class, type, 0,
+                                         output_len, 0,
+                                         NULL);
+       } else {
+               rc = SMB2_query_info_init(tcon, server,
+                                         &rqst[1],
+                                         COMPOUND_FID,
+                                         COMPOUND_FID,
+                                         class, type, 0,
+                                         output_len, 0,
+                                         NULL);
+       }
        if (rc)
                goto qic_exit;
-       smb2_set_next_command(tcon, &rqst[1]);
-       smb2_set_related(&rqst[1]);
+       if (!cfid) {
+               smb2_set_next_command(tcon, &rqst[1]);
+               smb2_set_related(&rqst[1]);
+       }
 
        memset(&close_iov, 0, sizeof(close_iov));
        rqst[2].rq_iov = close_iov;
@@ -2713,9 +2744,15 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
                goto qic_exit;
        smb2_set_related(&rqst[2]);
 
-       rc = compound_send_recv(xid, ses, server,
-                               flags, 3, rqst,
-                               resp_buftype, rsp_iov);
+       if (cfid) {
+               rc = compound_send_recv(xid, ses, server,
+                                       flags, 1, &rqst[1],
+                                       &resp_buftype[1], &rsp_iov[1]);
+       } else {
+               rc = compound_send_recv(xid, ses, server,
+                                       flags, 3, rqst,
+                                       resp_buftype, rsp_iov);
+       }
        if (rc) {
                free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
                if (rc == -EREMCHG) {
@@ -2729,11 +2766,14 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
        *buftype = resp_buftype[1];
 
  qic_exit:
+       kfree(utf16_path);
        SMB2_open_free(&rqst[0]);
        SMB2_query_info_free(&rqst[1]);
        SMB2_close_free(&rqst[2]);
        free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
        free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
+       if (cfid)
+               close_cached_dir(cfid);
        return rc;
 }
 
@@ -2743,13 +2783,12 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
 {
        struct smb2_query_info_rsp *rsp;
        struct smb2_fs_full_size_info *info = NULL;
-       __le16 utf16_path = 0; /* Null - open root of share */
        struct kvec rsp_iov = {NULL, 0};
        int buftype = CIFS_NO_BUFFER;
        int rc;
 
 
-       rc = smb2_query_info_compound(xid, tcon, &utf16_path,
+       rc = smb2_query_info_compound(xid, tcon, "",
                                      FILE_READ_ATTRIBUTES,
                                      FS_FULL_SIZE_INFORMATION,
                                      SMB2_O_INFO_FILESYSTEM,
@@ -4293,12 +4332,12 @@ static __le32
 map_oplock_to_lease(u8 oplock)
 {
        if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
-               return SMB2_LEASE_WRITE_CACHING | SMB2_LEASE_READ_CACHING;
+               return SMB2_LEASE_WRITE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE;
        else if (oplock == SMB2_OPLOCK_LEVEL_II)
-               return SMB2_LEASE_READ_CACHING;
+               return SMB2_LEASE_READ_CACHING_LE;
        else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
-               return SMB2_LEASE_HANDLE_CACHING | SMB2_LEASE_READ_CACHING |
-                      SMB2_LEASE_WRITE_CACHING;
+               return SMB2_LEASE_HANDLE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE |
+                      SMB2_LEASE_WRITE_CACHING_LE;
        return 0;
 }
 
@@ -4360,7 +4399,7 @@ smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
        struct create_lease *lc = (struct create_lease *)buf;
 
        *epoch = 0; /* not used */
-       if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
+       if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE)
                return SMB2_OPLOCK_LEVEL_NOCHANGE;
        return le32_to_cpu(lc->lcontext.LeaseState);
 }
@@ -4371,7 +4410,7 @@ smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
        struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
 
        *epoch = le16_to_cpu(lc->lcontext.Epoch);
-       if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
+       if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE)
                return SMB2_OPLOCK_LEVEL_NOCHANGE;
        if (lease_key)
                memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
@@ -5814,8 +5853,8 @@ struct smb_version_values smb20_values = {
        .protocol_id = SMB20_PROT_ID,
        .req_capabilities = 0, /* MBZ */
        .large_lock_type = 0,
-       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
-       .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+       .shared_lock_type = SMB2_LOCKFLAG_SHARED,
        .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
        .header_size = sizeof(struct smb2_hdr),
        .header_preamble_size = 0,
@@ -5835,8 +5874,8 @@ struct smb_version_values smb21_values = {
        .protocol_id = SMB21_PROT_ID,
        .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
        .large_lock_type = 0,
-       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
-       .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+       .shared_lock_type = SMB2_LOCKFLAG_SHARED,
        .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
        .header_size = sizeof(struct smb2_hdr),
        .header_preamble_size = 0,
@@ -5856,8 +5895,8 @@ struct smb_version_values smb3any_values = {
        .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
        .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
-       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
-       .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+       .shared_lock_type = SMB2_LOCKFLAG_SHARED,
        .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
        .header_size = sizeof(struct smb2_hdr),
        .header_preamble_size = 0,
@@ -5877,8 +5916,8 @@ struct smb_version_values smbdefault_values = {
        .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
        .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
-       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
-       .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+       .shared_lock_type = SMB2_LOCKFLAG_SHARED,
        .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
        .header_size = sizeof(struct smb2_hdr),
        .header_preamble_size = 0,
@@ -5898,8 +5937,8 @@ struct smb_version_values smb30_values = {
        .protocol_id = SMB30_PROT_ID,
        .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
-       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
-       .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+       .shared_lock_type = SMB2_LOCKFLAG_SHARED,
        .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
        .header_size = sizeof(struct smb2_hdr),
        .header_preamble_size = 0,
@@ -5919,8 +5958,8 @@ struct smb_version_values smb302_values = {
        .protocol_id = SMB302_PROT_ID,
        .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
-       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
-       .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+       .shared_lock_type = SMB2_LOCKFLAG_SHARED,
        .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
        .header_size = sizeof(struct smb2_hdr),
        .header_preamble_size = 0,
@@ -5940,8 +5979,8 @@ struct smb_version_values smb311_values = {
        .protocol_id = SMB311_PROT_ID,
        .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
-       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
-       .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+       .shared_lock_type = SMB2_LOCKFLAG_SHARED,
        .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
        .header_size = sizeof(struct smb2_hdr),
        .header_preamble_size = 0,
index 7e7909b1ae1180f836054d51a0da6bc918943231..1b7ad0c09566876185216446aa88f0b8f96fcd28 100644 (file)
@@ -163,7 +163,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
                return 0;
 
        spin_lock(&cifs_tcp_ses_lock);
-       if (tcon->tidStatus == CifsExiting) {
+       if (tcon->status == TID_EXITING) {
                /*
                 * only tree disconnect, open, and write,
                 * (and ulogoff which does not have tcon)
@@ -2734,13 +2734,10 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
                goto err_free_req;
        }
 
-       trace_smb3_posix_mkdir_done(xid, le64_to_cpu(rsp->PersistentFileId),
-                                   tcon->tid,
-                                   ses->Suid, CREATE_NOT_FILE,
-                                   FILE_WRITE_ATTRIBUTES);
+       trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid,
+                                   CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES);
 
-       SMB2_close(xid, tcon, le64_to_cpu(rsp->PersistentFileId),
-                  le64_to_cpu(rsp->VolatileFileId));
+       SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
 
        /* Eventually save off posix specific response info and timestaps */
 
@@ -3009,14 +3006,12 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
        } else if (rsp == NULL) /* unlikely to happen, but safer to check */
                goto creat_exit;
        else
-               trace_smb3_open_done(xid, le64_to_cpu(rsp->PersistentFileId),
-                                    tcon->tid,
-                                    ses->Suid, oparms->create_options,
-                                    oparms->desired_access);
+               trace_smb3_open_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid,
+                                    oparms->create_options, oparms->desired_access);
 
        atomic_inc(&tcon->num_remote_opens);
-       oparms->fid->persistent_fid = le64_to_cpu(rsp->PersistentFileId);
-       oparms->fid->volatile_fid = le64_to_cpu(rsp->VolatileFileId);
+       oparms->fid->persistent_fid = rsp->PersistentFileId;
+       oparms->fid->volatile_fid = rsp->VolatileFileId;
        oparms->fid->access = oparms->desired_access;
 #ifdef CONFIG_CIFS_DEBUG2
        oparms->fid->mid = le64_to_cpu(rsp->hdr.MessageId);
@@ -3313,8 +3308,8 @@ SMB2_close_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
        if (rc)
                return rc;
 
-       req->PersistentFileId = cpu_to_le64(persistent_fid);
-       req->VolatileFileId = cpu_to_le64(volatile_fid);
+       req->PersistentFileId = persistent_fid;
+       req->VolatileFileId = volatile_fid;
        if (query_attrs)
                req->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB;
        else
@@ -3677,8 +3672,8 @@ SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst,
        if (rc)
                return rc;
 
-       req->PersistentFileId = cpu_to_le64(persistent_fid);
-       req->VolatileFileId = cpu_to_le64(volatile_fid);
+       req->PersistentFileId = persistent_fid;
+       req->VolatileFileId = volatile_fid;
        /* See note 354 of MS-SMB2, 64K max */
        req->OutputBufferLength =
                cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE);
@@ -3858,12 +3853,14 @@ void smb2_reconnect_server(struct work_struct *work)
        tcon = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
        if (!tcon) {
                resched = true;
-               list_del_init(&ses->rlist);
-               cifs_put_smb_ses(ses);
+               list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
+                       list_del_init(&ses->rlist);
+                       cifs_put_smb_ses(ses);
+               }
                goto done;
        }
 
-       tcon->tidStatus = CifsGood;
+       tcon->status = TID_GOOD;
        tcon->retry = false;
        tcon->need_reconnect = false;
 
@@ -3951,8 +3948,8 @@ SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst,
        if (rc)
                return rc;
 
-       req->PersistentFileId = cpu_to_le64(persistent_fid);
-       req->VolatileFileId = cpu_to_le64(volatile_fid);
+       req->PersistentFileId = persistent_fid;
+       req->VolatileFileId = volatile_fid;
 
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
@@ -4033,8 +4030,8 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
        shdr = &req->hdr;
        shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
 
-       req->PersistentFileId = cpu_to_le64(io_parms->persistent_fid);
-       req->VolatileFileId = cpu_to_le64(io_parms->volatile_fid);
+       req->PersistentFileId = io_parms->persistent_fid;
+       req->VolatileFileId = io_parms->volatile_fid;
        req->ReadChannelInfoOffset = 0; /* reserved */
        req->ReadChannelInfoLength = 0; /* reserved */
        req->Channel = 0; /* reserved */
@@ -4094,8 +4091,8 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
                         */
                        shdr->SessionId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
                        shdr->Id.SyncId.TreeId = cpu_to_le32(0xFFFFFFFF);
-                       req->PersistentFileId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
-                       req->VolatileFileId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
+                       req->PersistentFileId = (u64)-1;
+                       req->VolatileFileId = (u64)-1;
                }
        }
        if (remaining_bytes > io_parms->length)
@@ -4307,21 +4304,19 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
                        cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
                        cifs_dbg(VFS, "Send error in read = %d\n", rc);
                        trace_smb3_read_err(xid,
-                                           le64_to_cpu(req->PersistentFileId),
+                                           req->PersistentFileId,
                                            io_parms->tcon->tid, ses->Suid,
                                            io_parms->offset, io_parms->length,
                                            rc);
                } else
-                       trace_smb3_read_done(xid,
-                                            le64_to_cpu(req->PersistentFileId),
-                                            io_parms->tcon->tid, ses->Suid,
-                                            io_parms->offset, 0);
+                       trace_smb3_read_done(xid, req->PersistentFileId, io_parms->tcon->tid,
+                                            ses->Suid, io_parms->offset, 0);
                free_rsp_buf(resp_buftype, rsp_iov.iov_base);
                cifs_small_buf_release(req);
                return rc == -ENODATA ? 0 : rc;
        } else
                trace_smb3_read_done(xid,
-                                    le64_to_cpu(req->PersistentFileId),
+                                   req->PersistentFileId,
                                    io_parms->tcon->tid, ses->Suid,
                                    io_parms->offset, io_parms->length);
 
@@ -4463,8 +4458,8 @@ smb2_async_writev(struct cifs_writedata *wdata,
        shdr = (struct smb2_hdr *)req;
        shdr->Id.SyncId.ProcessId = cpu_to_le32(wdata->cfile->pid);
 
-       req->PersistentFileId = cpu_to_le64(wdata->cfile->fid.persistent_fid);
-       req->VolatileFileId = cpu_to_le64(wdata->cfile->fid.volatile_fid);
+       req->PersistentFileId = wdata->cfile->fid.persistent_fid;
+       req->VolatileFileId = wdata->cfile->fid.volatile_fid;
        req->WriteChannelInfoOffset = 0;
        req->WriteChannelInfoLength = 0;
        req->Channel = 0;
@@ -4562,7 +4557,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
 
        if (rc) {
                trace_smb3_write_err(0 /* no xid */,
-                                    le64_to_cpu(req->PersistentFileId),
+                                    req->PersistentFileId,
                                     tcon->tid, tcon->ses->Suid, wdata->offset,
                                     wdata->bytes, rc);
                kref_put(&wdata->refcount, release);
@@ -4615,8 +4610,8 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
 
        req->hdr.Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
 
-       req->PersistentFileId = cpu_to_le64(io_parms->persistent_fid);
-       req->VolatileFileId = cpu_to_le64(io_parms->volatile_fid);
+       req->PersistentFileId = io_parms->persistent_fid;
+       req->VolatileFileId = io_parms->volatile_fid;
        req->WriteChannelInfoOffset = 0;
        req->WriteChannelInfoLength = 0;
        req->Channel = 0;
@@ -4645,7 +4640,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
 
        if (rc) {
                trace_smb3_write_err(xid,
-                                    le64_to_cpu(req->PersistentFileId),
+                                    req->PersistentFileId,
                                     io_parms->tcon->tid,
                                     io_parms->tcon->ses->Suid,
                                     io_parms->offset, io_parms->length, rc);
@@ -4654,7 +4649,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
        } else {
                *nbytes = le32_to_cpu(rsp->DataLength);
                trace_smb3_write_done(xid,
-                                     le64_to_cpu(req->PersistentFileId),
+                                     req->PersistentFileId,
                                      io_parms->tcon->tid,
                                      io_parms->tcon->ses->Suid,
                                      io_parms->offset, *nbytes);
index 33cfd0a1adf12ca43bde5261184f1956d417b0bc..d8c4388b190d8eaabd16c99d83b1d68cc54a0a6d 100644 (file)
@@ -56,16 +56,6 @@ struct smb2_rdma_crypto_transform {
 
 #define COMPOUND_FID 0xFFFFFFFFFFFFFFFFULL
 
-#define SMB2_ERROR_STRUCTURE_SIZE2 cpu_to_le16(9)
-
-struct smb2_err_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize;
-       __le16 Reserved; /* MBZ */
-       __le32 ByteCount;  /* even if zero, at least one byte follows */
-       __u8   ErrorData[1];  /* variable length */
-} __packed;
-
 #define SYMLINK_ERROR_TAG 0x4c4d5953
 
 struct smb2_symlink_err_rsp {
@@ -139,47 +129,6 @@ struct share_redirect_error_context_rsp {
 #define SMB2_LEASE_HANDLE_CACHING_HE   0x02
 #define SMB2_LEASE_WRITE_CACHING_HE    0x04
 
-#define SMB2_LEASE_NONE                        cpu_to_le32(0x00)
-#define SMB2_LEASE_READ_CACHING                cpu_to_le32(0x01)
-#define SMB2_LEASE_HANDLE_CACHING      cpu_to_le32(0x02)
-#define SMB2_LEASE_WRITE_CACHING       cpu_to_le32(0x04)
-
-#define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS cpu_to_le32(0x00000002)
-#define SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET cpu_to_le32(0x00000004)
-
-#define SMB2_LEASE_KEY_SIZE 16
-
-struct lease_context {
-       u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
-       __le32 LeaseState;
-       __le32 LeaseFlags;
-       __le64 LeaseDuration;
-} __packed;
-
-struct lease_context_v2 {
-       u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
-       __le32 LeaseState;
-       __le32 LeaseFlags;
-       __le64 LeaseDuration;
-       __le64 ParentLeaseKeyLow;
-       __le64 ParentLeaseKeyHigh;
-       __le16 Epoch;
-       __le16 Reserved;
-} __packed;
-
-struct create_lease {
-       struct create_context ccontext;
-       __u8   Name[8];
-       struct lease_context lcontext;
-} __packed;
-
-struct create_lease_v2 {
-       struct create_context ccontext;
-       __u8   Name[8];
-       struct lease_context_v2 lcontext;
-       __u8   Pad[4];
-} __packed;
-
 struct create_durable {
        struct create_context ccontext;
        __u8   Name[8];
@@ -192,13 +141,6 @@ struct create_durable {
        } Data;
 } __packed;
 
-struct create_posix {
-       struct create_context ccontext;
-       __u8    Name[16];
-       __le32  Mode;
-       __u32   Reserved;
-} __packed;
-
 /* See MS-SMB2 2.2.13.2.11 */
 /* Flags */
 #define SMB2_DHANDLE_FLAG_PERSISTENT   0x00000002
@@ -287,12 +229,6 @@ struct copychunk_ioctl {
        __u32 Reserved2;
 } __packed;
 
-/* this goes in the ioctl buffer when doing FSCTL_SET_ZERO_DATA */
-struct file_zero_data_information {
-       __le64  FileOffset;
-       __le64  BeyondFinalZero;
-} __packed;
-
 struct copychunk_ioctl_rsp {
        __le32 ChunksWritten;
        __le32 ChunkBytesWritten;
@@ -338,11 +274,6 @@ struct fsctl_get_integrity_information_rsp {
        __le32  ClusterSizeInBytes;
 } __packed;
 
-struct file_allocated_range_buffer {
-       __le64  file_offset;
-       __le64  length;
-} __packed;
-
 /* Integrity ChecksumAlgorithm choices for above */
 #define        CHECKSUM_TYPE_NONE      0x0000
 #define        CHECKSUM_TYPE_CRC64     0x0002
@@ -351,53 +282,6 @@ struct file_allocated_range_buffer {
 /* Integrity flags for above */
 #define FSCTL_INTEGRITY_FLAG_CHECKSUM_ENFORCEMENT_OFF  0x00000001
 
-/* Reparse structures - see MS-FSCC 2.1.2 */
-
-/* struct fsctl_reparse_info_req is empty, only response structs (see below) */
-
-struct reparse_data_buffer {
-       __le32  ReparseTag;
-       __le16  ReparseDataLength;
-       __u16   Reserved;
-       __u8    DataBuffer[]; /* Variable Length */
-} __packed;
-
-struct reparse_guid_data_buffer {
-       __le32  ReparseTag;
-       __le16  ReparseDataLength;
-       __u16   Reserved;
-       __u8    ReparseGuid[16];
-       __u8    DataBuffer[]; /* Variable Length */
-} __packed;
-
-struct reparse_mount_point_data_buffer {
-       __le32  ReparseTag;
-       __le16  ReparseDataLength;
-       __u16   Reserved;
-       __le16  SubstituteNameOffset;
-       __le16  SubstituteNameLength;
-       __le16  PrintNameOffset;
-       __le16  PrintNameLength;
-       __u8    PathBuffer[]; /* Variable Length */
-} __packed;
-
-#define SYMLINK_FLAG_RELATIVE 0x00000001
-
-struct reparse_symlink_data_buffer {
-       __le32  ReparseTag;
-       __le16  ReparseDataLength;
-       __u16   Reserved;
-       __le16  SubstituteNameOffset;
-       __le16  SubstituteNameLength;
-       __le16  PrintNameOffset;
-       __le16  PrintNameLength;
-       __le32  Flags;
-       __u8    PathBuffer[]; /* Variable Length */
-} __packed;
-
-/* See MS-FSCC 2.1.2.6 and cifspdu.h for struct reparse_posix_data */
-
-
 /* See MS-DFSC 2.2.2 */
 struct fsctl_get_dfs_referral_req {
        __le16 MaxReferralLevel;
@@ -413,22 +297,6 @@ struct network_resiliency_req {
 } __packed;
 /* There is no buffer for the response ie no struct network_resiliency_rsp */
 
-
-struct validate_negotiate_info_req {
-       __le32 Capabilities;
-       __u8   Guid[SMB2_CLIENT_GUID_SIZE];
-       __le16 SecurityMode;
-       __le16 DialectCount;
-       __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */
-} __packed;
-
-struct validate_negotiate_info_rsp {
-       __le32 Capabilities;
-       __u8   Guid[SMB2_CLIENT_GUID_SIZE];
-       __le16 SecurityMode;
-       __le16 Dialect; /* Dialect in use for the connection */
-} __packed;
-
 #define RSS_CAPABLE    cpu_to_le32(0x00000001)
 #define RDMA_CAPABLE   cpu_to_le32(0x00000002)
 
@@ -464,14 +332,6 @@ struct compress_ioctl {
        __le16 CompressionState; /* See cifspdu.h for possible flag values */
 } __packed;
 
-struct duplicate_extents_to_file {
-       __u64 PersistentFileHandle; /* source file handle, opaque endianness */
-       __u64 VolatileFileHandle;
-       __le64 SourceFileOffset;
-       __le64 TargetFileOffset;
-       __le64 ByteCount;  /* Bytes to be copied */
-} __packed;
-
 /*
  * Maximum number of iovs we need for an ioctl request.
  * [0] : struct smb2_ioctl_req
@@ -479,370 +339,11 @@ struct duplicate_extents_to_file {
  */
 #define SMB2_IOCTL_IOV_SIZE 2
 
-struct smb2_ioctl_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize;   /* Must be 57 */
-       __u16 Reserved;
-       __le32 CtlCode;
-       __u64  PersistentFileId; /* opaque endianness */
-       __u64  VolatileFileId; /* opaque endianness */
-       __le32 InputOffset;
-       __le32 InputCount;
-       __le32 MaxInputResponse;
-       __le32 OutputOffset;
-       __le32 OutputCount;
-       __le32 MaxOutputResponse;
-       __le32 Flags;
-       __u32  Reserved2;
-       __u8   Buffer[];
-} __packed;
-
-struct smb2_ioctl_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize;   /* Must be 57 */
-       __u16 Reserved;
-       __le32 CtlCode;
-       __u64  PersistentFileId; /* opaque endianness */
-       __u64  VolatileFileId; /* opaque endianness */
-       __le32 InputOffset;
-       __le32 InputCount;
-       __le32 OutputOffset;
-       __le32 OutputCount;
-       __le32 Flags;
-       __u32  Reserved2;
-       /* char * buffer[] */
-} __packed;
-
-#define SMB2_LOCKFLAG_SHARED_LOCK      0x0001
-#define SMB2_LOCKFLAG_EXCLUSIVE_LOCK   0x0002
-#define SMB2_LOCKFLAG_UNLOCK           0x0004
-#define SMB2_LOCKFLAG_FAIL_IMMEDIATELY 0x0010
-
-struct smb2_lock_element {
-       __le64 Offset;
-       __le64 Length;
-       __le32 Flags;
-       __le32 Reserved;
-} __packed;
-
-struct smb2_lock_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 48 */
-       __le16 LockCount;
-       /*
-        * The least significant four bits are the index, the other 28 bits are
-        * the lock sequence number (0 to 64). See MS-SMB2 2.2.26
-        */
-       __le32 LockSequenceNumber;
-       __u64  PersistentFileId; /* opaque endianness */
-       __u64  VolatileFileId; /* opaque endianness */
-       /* Followed by at least one */
-       struct smb2_lock_element locks[1];
-} __packed;
-
-struct smb2_lock_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 4 */
-       __le16 Reserved;
-} __packed;
-
-struct smb2_echo_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize;   /* Must be 4 */
-       __u16  Reserved;
-} __packed;
-
-struct smb2_echo_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize;   /* Must be 4 */
-       __u16  Reserved;
-} __packed;
-
-/* search (query_directory) Flags field */
-#define SMB2_RESTART_SCANS             0x01
-#define SMB2_RETURN_SINGLE_ENTRY       0x02
-#define SMB2_INDEX_SPECIFIED           0x04
-#define SMB2_REOPEN                    0x10
-
-#define SMB2_QUERY_DIRECTORY_IOV_SIZE 2
-
-/*
- * Valid FileInformation classes.
- *
- * Note that these are a subset of the (file) QUERY_INFO levels defined
- * later in this file (but since QUERY_DIRECTORY uses equivalent numbers
- * we do not redefine them here)
- *
- * FileDirectoryInfomation             0x01
- * FileFullDirectoryInformation                0x02
- * FileIdFullDirectoryInformation      0x26
- * FileBothDirectoryInformation                0x03
- * FileIdBothDirectoryInformation      0x25
- * FileNamesInformation                        0x0C
- * FileIdExtdDirectoryInformation      0x3C
- */
-
-struct smb2_query_directory_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 33 */
-       __u8   FileInformationClass;
-       __u8   Flags;
-       __le32 FileIndex;
-       __u64  PersistentFileId; /* opaque endianness */
-       __u64  VolatileFileId; /* opaque endianness */
-       __le16 FileNameOffset;
-       __le16 FileNameLength;
-       __le32 OutputBufferLength;
-       __u8   Buffer[1];
-} __packed;
-
-struct smb2_query_directory_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 9 */
-       __le16 OutputBufferOffset;
-       __le32 OutputBufferLength;
-       __u8   Buffer[1];
-} __packed;
-
-/* Possible InfoType values */
-#define SMB2_O_INFO_FILE       0x01
-#define SMB2_O_INFO_FILESYSTEM 0x02
-#define SMB2_O_INFO_SECURITY   0x03
-#define SMB2_O_INFO_QUOTA      0x04
-
-/* Security info type additionalinfo flags. See MS-SMB2 (2.2.37) or MS-DTYP */
-#define OWNER_SECINFO   0x00000001
-#define GROUP_SECINFO   0x00000002
-#define DACL_SECINFO   0x00000004
-#define SACL_SECINFO   0x00000008
-#define LABEL_SECINFO   0x00000010
-#define ATTRIBUTE_SECINFO   0x00000020
-#define SCOPE_SECINFO   0x00000040
-#define BACKUP_SECINFO   0x00010000
-#define UNPROTECTED_SACL_SECINFO   0x10000000
-#define UNPROTECTED_DACL_SECINFO   0x20000000
-#define PROTECTED_SACL_SECINFO   0x40000000
-#define PROTECTED_DACL_SECINFO   0x80000000
-
-/* Flags used for FileFullEAinfo */
-#define SL_RESTART_SCAN                0x00000001
-#define SL_RETURN_SINGLE_ENTRY 0x00000002
-#define SL_INDEX_SPECIFIED     0x00000004
-
-struct smb2_query_info_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 41 */
-       __u8   InfoType;
-       __u8   FileInfoClass;
-       __le32 OutputBufferLength;
-       __le16 InputBufferOffset;
-       __u16  Reserved;
-       __le32 InputBufferLength;
-       __le32 AdditionalInformation;
-       __le32 Flags;
-       __u64  PersistentFileId; /* opaque endianness */
-       __u64  VolatileFileId; /* opaque endianness */
-       __u8   Buffer[1];
-} __packed;
-
-struct smb2_query_info_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 9 */
-       __le16 OutputBufferOffset;
-       __le32 OutputBufferLength;
-       __u8   Buffer[1];
-} __packed;
-
-/*
- * Maximum number of iovs we need for a set-info request.
- * The largest one is rename/hardlink
- * [0] : struct smb2_set_info_req + smb2_file_[rename|link]_info
- * [1] : path
- * [2] : compound padding
- */
-#define SMB2_SET_INFO_IOV_SIZE 3
-
-struct smb2_set_info_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 33 */
-       __u8   InfoType;
-       __u8   FileInfoClass;
-       __le32 BufferLength;
-       __le16 BufferOffset;
-       __u16  Reserved;
-       __le32 AdditionalInformation;
-       __u64  PersistentFileId; /* opaque endianness */
-       __u64  VolatileFileId; /* opaque endianness */
-       __u8   Buffer[1];
-} __packed;
-
-struct smb2_set_info_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 2 */
-} __packed;
-
-struct smb2_oplock_break {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 24 */
-       __u8   OplockLevel;
-       __u8   Reserved;
-       __le32 Reserved2;
-       __u64  PersistentFid;
-       __u64  VolatileFid;
-} __packed;
-
-#define SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED cpu_to_le32(0x01)
-
-struct smb2_lease_break {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 44 */
-       __le16 Epoch;
-       __le32 Flags;
-       __u8   LeaseKey[16];
-       __le32 CurrentLeaseState;
-       __le32 NewLeaseState;
-       __le32 BreakReason;
-       __le32 AccessMaskHint;
-       __le32 ShareMaskHint;
-} __packed;
-
-struct smb2_lease_ack {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 36 */
-       __le16 Reserved;
-       __le32 Flags;
-       __u8   LeaseKey[16];
-       __le32 LeaseState;
-       __le64 LeaseDuration;
-} __packed;
-
 /*
- *     PDU infolevel structure definitions
+ *     PDU query infolevel structure definitions
  *     BB consider moving to a different header
  */
 
-/* File System Information Classes */
-#define FS_VOLUME_INFORMATION          1 /* Query */
-#define FS_LABEL_INFORMATION           2 /* Local only */
-#define FS_SIZE_INFORMATION            3 /* Query */
-#define FS_DEVICE_INFORMATION          4 /* Query */
-#define FS_ATTRIBUTE_INFORMATION       5 /* Query */
-#define FS_CONTROL_INFORMATION         6 /* Query, Set */
-#define FS_FULL_SIZE_INFORMATION       7 /* Query */
-#define FS_OBJECT_ID_INFORMATION       8 /* Query, Set */
-#define FS_DRIVER_PATH_INFORMATION     9 /* Local only */
-#define FS_VOLUME_FLAGS_INFORMATION    10 /* Local only */
-#define FS_SECTOR_SIZE_INFORMATION     11 /* SMB3 or later. Query */
-#define FS_POSIX_INFORMATION           100 /* SMB3.1.1 POSIX. Query */
-
-struct smb2_fs_full_size_info {
-       __le64 TotalAllocationUnits;
-       __le64 CallerAvailableAllocationUnits;
-       __le64 ActualAvailableAllocationUnits;
-       __le32 SectorsPerAllocationUnit;
-       __le32 BytesPerSector;
-} __packed;
-
-#define SSINFO_FLAGS_ALIGNED_DEVICE            0x00000001
-#define SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE 0x00000002
-#define SSINFO_FLAGS_NO_SEEK_PENALTY           0x00000004
-#define SSINFO_FLAGS_TRIM_ENABLED              0x00000008
-
-/* sector size info struct */
-struct smb3_fs_ss_info {
-       __le32 LogicalBytesPerSector;
-       __le32 PhysicalBytesPerSectorForAtomicity;
-       __le32 PhysicalBytesPerSectorForPerf;
-       __le32 FileSystemEffectivePhysicalBytesPerSectorForAtomicity;
-       __le32 Flags;
-       __le32 ByteOffsetForSectorAlignment;
-       __le32 ByteOffsetForPartitionAlignment;
-} __packed;
-
-/* volume info struct - see MS-FSCC 2.5.9 */
-#define MAX_VOL_LABEL_LEN      32
-struct smb3_fs_vol_info {
-       __le64  VolumeCreationTime;
-       __u32   VolumeSerialNumber;
-       __le32  VolumeLabelLength; /* includes trailing null */
-       __u8    SupportsObjects; /* True if eg like NTFS, supports objects */
-       __u8    Reserved;
-       __u8    VolumeLabel[]; /* variable len */
-} __packed;
-
-/* partial list of QUERY INFO levels */
-#define FILE_DIRECTORY_INFORMATION     1
-#define FILE_FULL_DIRECTORY_INFORMATION 2
-#define FILE_BOTH_DIRECTORY_INFORMATION 3
-#define FILE_BASIC_INFORMATION         4
-#define FILE_STANDARD_INFORMATION      5
-#define FILE_INTERNAL_INFORMATION      6
-#define FILE_EA_INFORMATION            7
-#define FILE_ACCESS_INFORMATION                8
-#define FILE_NAME_INFORMATION          9
-#define FILE_RENAME_INFORMATION                10
-#define FILE_LINK_INFORMATION          11
-#define FILE_NAMES_INFORMATION         12
-#define FILE_DISPOSITION_INFORMATION   13
-#define FILE_POSITION_INFORMATION      14
-#define FILE_FULL_EA_INFORMATION       15
-#define FILE_MODE_INFORMATION          16
-#define FILE_ALIGNMENT_INFORMATION     17
-#define FILE_ALL_INFORMATION           18
-#define FILE_ALLOCATION_INFORMATION    19
-#define FILE_END_OF_FILE_INFORMATION   20
-#define FILE_ALTERNATE_NAME_INFORMATION 21
-#define FILE_STREAM_INFORMATION                22
-#define FILE_PIPE_INFORMATION          23
-#define FILE_PIPE_LOCAL_INFORMATION    24
-#define FILE_PIPE_REMOTE_INFORMATION   25
-#define FILE_MAILSLOT_QUERY_INFORMATION 26
-#define FILE_MAILSLOT_SET_INFORMATION  27
-#define FILE_COMPRESSION_INFORMATION   28
-#define FILE_OBJECT_ID_INFORMATION     29
-/* Number 30 not defined in documents */
-#define FILE_MOVE_CLUSTER_INFORMATION  31
-#define FILE_QUOTA_INFORMATION         32
-#define FILE_REPARSE_POINT_INFORMATION 33
-#define FILE_NETWORK_OPEN_INFORMATION  34
-#define FILE_ATTRIBUTE_TAG_INFORMATION 35
-#define FILE_TRACKING_INFORMATION      36
-#define FILEID_BOTH_DIRECTORY_INFORMATION 37
-#define FILEID_FULL_DIRECTORY_INFORMATION 38
-#define FILE_VALID_DATA_LENGTH_INFORMATION 39
-#define FILE_SHORT_NAME_INFORMATION    40
-#define FILE_SFIO_RESERVE_INFORMATION  44
-#define FILE_SFIO_VOLUME_INFORMATION   45
-#define FILE_HARD_LINK_INFORMATION     46
-#define FILE_NORMALIZED_NAME_INFORMATION 48
-#define FILEID_GLOBAL_TX_DIRECTORY_INFORMATION 50
-#define FILE_STANDARD_LINK_INFORMATION 54
-#define FILE_ID_INFORMATION            59
-#define FILE_ID_EXTD_DIRECTORY_INFORMATION 60
-
-struct smb2_file_internal_info {
-       __le64 IndexNumber;
-} __packed; /* level 6 Query */
-
-struct smb2_file_rename_info { /* encoding of request for level 10 */
-       __u8   ReplaceIfExists; /* 1 = replace existing target with new */
-                               /* 0 = fail if target already exists */
-       __u8   Reserved[7];
-       __u64  RootDirectory;  /* MBZ for network operations (why says spec?) */
-       __le32 FileNameLength;
-       char   FileName[];     /* New name to be assigned */
-       /* padding - overall struct size must be >= 24 so filename + pad >= 6 */
-} __packed; /* level 10 Set */
-
-struct smb2_file_link_info { /* encoding of request for level 11 */
-       __u8   ReplaceIfExists; /* 1 = replace existing link with new */
-                               /* 0 = fail if link already exists */
-       __u8   Reserved[7];
-       __u64  RootDirectory;  /* MBZ for network operations (why says spec?) */
-       __le32 FileNameLength;
-       char   FileName[];     /* Name to be assigned to new link */
-} __packed; /* level 11 Set */
-
 struct smb2_file_full_ea_info { /* encoding of response for level 15 */
        __le32 next_entry_offset;
        __u8   flags;
@@ -851,38 +352,6 @@ struct smb2_file_full_ea_info { /* encoding of response for level 15 */
        char   ea_data[]; /* \0 terminated name plus value */
 } __packed; /* level 15 Set */
 
-/*
- * This level 18, although with struct with same name is different from cifs
- * level 0x107. Level 0x107 has an extra u64 between AccessFlags and
- * CurrentByteOffset.
- */
-struct smb2_file_all_info { /* data block encoding of response to level 18 */
-       __le64 CreationTime;    /* Beginning of FILE_BASIC_INFO equivalent */
-       __le64 LastAccessTime;
-       __le64 LastWriteTime;
-       __le64 ChangeTime;
-       __le32 Attributes;
-       __u32  Pad1;            /* End of FILE_BASIC_INFO_INFO equivalent */
-       __le64 AllocationSize;  /* Beginning of FILE_STANDARD_INFO equivalent */
-       __le64 EndOfFile;       /* size ie offset to first free byte in file */
-       __le32 NumberOfLinks;   /* hard links */
-       __u8   DeletePending;
-       __u8   Directory;
-       __u16  Pad2;            /* End of FILE_STANDARD_INFO equivalent */
-       __le64 IndexNumber;
-       __le32 EASize;
-       __le32 AccessFlags;
-       __le64 CurrentByteOffset;
-       __le32 Mode;
-       __le32 AlignmentRequirement;
-       __le32 FileNameLength;
-       char   FileName[1];
-} __packed; /* level 18 Query */
-
-struct smb2_file_eof_info { /* encoding of request for level 10 */
-       __le64 EndOfFile; /* new end of file value */
-} __packed; /* level 20 Set */
-
 struct smb2_file_reparse_point_info {
        __le64 IndexNumber;
        __le32 Tag;
@@ -935,6 +404,8 @@ struct create_posix_rsp {
        struct cifs_sid group; /* var-sized on the wire */
 } __packed;
 
+#define SMB2_QUERY_DIRECTORY_IOV_SIZE 2
+
 /*
  * SMB2-only POSIX info level for query dir
  *
@@ -966,31 +437,6 @@ struct smb2_posix_info {
         */
 } __packed;
 
-/* Level 100 query info */
-struct smb311_posix_qinfo {
-       __le64 CreationTime;
-       __le64 LastAccessTime;
-       __le64 LastWriteTime;
-       __le64 ChangeTime;
-       __le64 EndOfFile;
-       __le64 AllocationSize;
-       __le32 DosAttributes;
-       __le64 Inode;
-       __le32 DeviceId;
-       __le32 Zero;
-       /* beginning of POSIX Create Context Response */
-       __le32 HardLinks;
-       __le32 ReparseTag;
-       __le32 Mode;
-       u8     Sids[];
-       /*
-        * var sized owner SID
-        * var sized group SID
-        * le32 filenamelength
-        * u8  filename[]
-        */
-} __packed;
-
 /*
  * Parsed version of the above struct. Allows direct access to the
  * variable length fields
index 4a7062fd1c26b538dd46a2ba46c76a52b1b2ed9c..a69f1eed1cfe545f78cf5832819165cda6598214 100644 (file)
@@ -283,7 +283,7 @@ extern int smb311_update_preauth_hash(struct cifs_ses *ses,
                                      struct kvec *iov, int nvec);
 extern int smb2_query_info_compound(const unsigned int xid,
                                    struct cifs_tcon *tcon,
-                                   __le16 *utf16_path, u32 desired_access,
+                                   const char *path, u32 desired_access,
                                    u32 class, u32 type, u32 output_len,
                                    struct kvec *rsp, int *buftype,
                                    struct cifs_sb_info *cifs_sb);
index 6cecf302dcfdc2ad0fe15c3affc34d125e38bf1d..bc279616c513a86f14c002b9b29c9e34069820b9 100644 (file)
@@ -1006,6 +1006,13 @@ DEFINE_SMB3_CREDIT_EVENT(credit_timeout);
 DEFINE_SMB3_CREDIT_EVENT(insufficient_credits);
 DEFINE_SMB3_CREDIT_EVENT(too_many_credits);
 DEFINE_SMB3_CREDIT_EVENT(add_credits);
+DEFINE_SMB3_CREDIT_EVENT(adj_credits);
+DEFINE_SMB3_CREDIT_EVENT(hdr_credits);
+DEFINE_SMB3_CREDIT_EVENT(nblk_credits);
+DEFINE_SMB3_CREDIT_EVENT(pend_credits);
+DEFINE_SMB3_CREDIT_EVENT(wait_credits);
+DEFINE_SMB3_CREDIT_EVENT(waitff_credits);
+DEFINE_SMB3_CREDIT_EVENT(overflow_credits);
 DEFINE_SMB3_CREDIT_EVENT(set_credits);
 
 #endif /* _CIFS_TRACE_H */
index eeb1a699bd6f2360198f72eb7642887261feded1..d9d1c353bafc7daa2d5d09a87227075aef64680a 100644 (file)
@@ -542,7 +542,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
                in_flight = server->in_flight;
                spin_unlock(&server->req_lock);
 
-               trace_smb3_add_credits(server->CurrentMid,
+               trace_smb3_nblk_credits(server->CurrentMid,
                                server->conn_id, server->hostname, scredits, -1, in_flight);
                cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
                                __func__, 1, scredits);
@@ -648,7 +648,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
                        in_flight = server->in_flight;
                        spin_unlock(&server->req_lock);
 
-                       trace_smb3_add_credits(server->CurrentMid,
+                       trace_smb3_waitff_credits(server->CurrentMid,
                                        server->conn_id, server->hostname, scredits,
                                        -(num_credits), in_flight);
                        cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
index 4fcca79f39aebf6ca8076ab62526649eda1698e3..526a4c1bed9947b6931aae3a81de09c194229200 100644 (file)
@@ -248,7 +248,7 @@ EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
  * which must still be locked and not uptodate.  Normally, blocksize ==
  * PAGE_SIZE and the whole page is decrypted at once.
  *
- * This is for use by the filesystem's ->readpages() method.
+ * This is for use by the filesystem's ->readahead() method.
  *
  * Return: 0 on success; -errno on failure
  */
index 0ed880f42525b11b28ed93cd8dd38ceee464e373..e6dea6dfca161398c9e38324fc605e3a757ddd0f 100644 (file)
@@ -1066,12 +1066,9 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
 
        /* wake up the caller thread for sync decompression */
        if (sync) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&io->u.wait.lock, flags);
                if (!atomic_add_return(bios, &io->pending_bios))
-                       wake_up_locked(&io->u.wait);
-               spin_unlock_irqrestore(&io->u.wait.lock, flags);
+                       complete(&io->u.done);
+
                return;
        }
 
@@ -1217,7 +1214,7 @@ jobqueue_init(struct super_block *sb,
        } else {
 fg_out:
                q = fgq;
-               init_waitqueue_head(&fgq->u.wait);
+               init_completion(&fgq->u.done);
                atomic_set(&fgq->pending_bios, 0);
        }
        q->sb = sb;
@@ -1419,8 +1416,7 @@ static void z_erofs_runqueue(struct super_block *sb,
                return;
 
        /* wait until all bios are completed */
-       io_wait_event(io[JQ_SUBMIT].u.wait,
-                     !atomic_read(&io[JQ_SUBMIT].pending_bios));
+       wait_for_completion_io(&io[JQ_SUBMIT].u.done);
 
        /* handle synchronous decompress queue in the caller context */
        z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool);
index e043216b545f19ecb4b2d250b1e1efb2ca720823..800b11c53f5749ebcd3634a736b242b9fe887e9a 100644 (file)
@@ -97,7 +97,7 @@ struct z_erofs_decompressqueue {
        z_erofs_next_pcluster_t head;
 
        union {
-               wait_queue_head_t wait;
+               struct completion done;
                struct work_struct work;
        } u;
 };
index 619e5b4bed1009303f845435eda9e8ae446edee3..c6800b8809203b7d9a450ba13e839b70c1d65fbc 100644 (file)
@@ -203,7 +203,8 @@ struct exfat_mount_options {
        /* on error: continue, panic, remount-ro */
        enum exfat_error_mode errors;
        unsigned utf8:1, /* Use of UTF-8 character set */
-                discard:1; /* Issue discard requests on deletions */
+                discard:1, /* Issue discard requests on deletions */
+                keep_last_dots:1; /* Keep trailing periods in paths */
        int time_offset; /* Offset of timestamps from UTC (in minutes) */
 };
 
index d890fd34bb2d040a9c5c34939964a847f8a25c16..2f513005923661d4962817b7b6531f1b6548b7da 100644 (file)
@@ -218,8 +218,6 @@ int __exfat_truncate(struct inode *inode, loff_t new_size)
        if (exfat_free_cluster(inode, &clu))
                return -EIO;
 
-       exfat_clear_volume_dirty(sb);
-
        return 0;
 }
 
index af4eb39cc0c3ca415949d16bbc3ab44cc93f26e2..a02a04a993bfa9cccb3632538f8c97dc8ef84427 100644 (file)
@@ -65,11 +65,14 @@ static int exfat_d_revalidate(struct dentry *dentry, unsigned int flags)
        return ret;
 }
 
-/* returns the length of a struct qstr, ignoring trailing dots */
-static unsigned int exfat_striptail_len(unsigned int len, const char *name)
+/* returns the length of a struct qstr, ignoring trailing dots if necessary */
+static unsigned int exfat_striptail_len(unsigned int len, const char *name,
+                                       bool keep_last_dots)
 {
-       while (len && name[len - 1] == '.')
-               len--;
+       if (!keep_last_dots) {
+               while (len && name[len - 1] == '.')
+                       len--;
+       }
        return len;
 }
 
@@ -83,7 +86,8 @@ static int exfat_d_hash(const struct dentry *dentry, struct qstr *qstr)
        struct super_block *sb = dentry->d_sb;
        struct nls_table *t = EXFAT_SB(sb)->nls_io;
        const unsigned char *name = qstr->name;
-       unsigned int len = exfat_striptail_len(qstr->len, qstr->name);
+       unsigned int len = exfat_striptail_len(qstr->len, qstr->name,
+                          EXFAT_SB(sb)->options.keep_last_dots);
        unsigned long hash = init_name_hash(dentry);
        int i, charlen;
        wchar_t c;
@@ -104,8 +108,10 @@ static int exfat_d_cmp(const struct dentry *dentry, unsigned int len,
 {
        struct super_block *sb = dentry->d_sb;
        struct nls_table *t = EXFAT_SB(sb)->nls_io;
-       unsigned int alen = exfat_striptail_len(name->len, name->name);
-       unsigned int blen = exfat_striptail_len(len, str);
+       unsigned int alen = exfat_striptail_len(name->len, name->name,
+                               EXFAT_SB(sb)->options.keep_last_dots);
+       unsigned int blen = exfat_striptail_len(len, str,
+                               EXFAT_SB(sb)->options.keep_last_dots);
        wchar_t c1, c2;
        int charlen, i;
 
@@ -136,7 +142,8 @@ static int exfat_utf8_d_hash(const struct dentry *dentry, struct qstr *qstr)
 {
        struct super_block *sb = dentry->d_sb;
        const unsigned char *name = qstr->name;
-       unsigned int len = exfat_striptail_len(qstr->len, qstr->name);
+       unsigned int len = exfat_striptail_len(qstr->len, qstr->name,
+                              EXFAT_SB(sb)->options.keep_last_dots);
        unsigned long hash = init_name_hash(dentry);
        int i, charlen;
        unicode_t u;
@@ -161,8 +168,11 @@ static int exfat_utf8_d_cmp(const struct dentry *dentry, unsigned int len,
                const char *str, const struct qstr *name)
 {
        struct super_block *sb = dentry->d_sb;
-       unsigned int alen = exfat_striptail_len(name->len, name->name);
-       unsigned int blen = exfat_striptail_len(len, str);
+       unsigned int alen = exfat_striptail_len(name->len, name->name,
+                               EXFAT_SB(sb)->options.keep_last_dots);
+       unsigned int blen = exfat_striptail_len(len, str,
+                               EXFAT_SB(sb)->options.keep_last_dots);
+
        unicode_t u_a, u_b;
        int charlen, i;
 
@@ -416,13 +426,25 @@ static int __exfat_resolve_path(struct inode *inode, const unsigned char *path,
        struct super_block *sb = inode->i_sb;
        struct exfat_sb_info *sbi = EXFAT_SB(sb);
        struct exfat_inode_info *ei = EXFAT_I(inode);
+       int pathlen = strlen(path);
 
-       /* strip all trailing periods */
-       namelen = exfat_striptail_len(strlen(path), path);
+       /*
+        * get the length of the pathname excluding
+        * trailing periods, if any.
+        */
+       namelen = exfat_striptail_len(pathlen, path, false);
+       if (EXFAT_SB(sb)->options.keep_last_dots) {
+               /*
+                * Do not allow the creation of files with names
+                * ending with period(s).
+                */
+               if (!lookup && (namelen < pathlen))
+                       return -EINVAL;
+               namelen = pathlen;
+       }
        if (!namelen)
                return -ENOENT;
-
-       if (strlen(path) > (MAX_NAME_LENGTH * MAX_CHARSET_SIZE))
+       if (pathlen > (MAX_NAME_LENGTH * MAX_CHARSET_SIZE))
                return -ENAMETOOLONG;
 
        /*
@@ -554,7 +576,6 @@ static int exfat_create(struct user_namespace *mnt_userns, struct inode *dir,
        exfat_set_volume_dirty(sb);
        err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_FILE,
                &info);
-       exfat_clear_volume_dirty(sb);
        if (err)
                goto unlock;
 
@@ -812,7 +833,6 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
 
        /* This doesn't modify ei */
        ei->dir.dir = DIR_DELETED;
-       exfat_clear_volume_dirty(sb);
 
        inode_inc_iversion(dir);
        dir->i_mtime = dir->i_atime = current_time(dir);
@@ -846,7 +866,6 @@ static int exfat_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
        exfat_set_volume_dirty(sb);
        err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_DIR,
                &info);
-       exfat_clear_volume_dirty(sb);
        if (err)
                goto unlock;
 
@@ -976,7 +995,6 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
                goto unlock;
        }
        ei->dir.dir = DIR_DELETED;
-       exfat_clear_volume_dirty(sb);
 
        inode_inc_iversion(dir);
        dir->i_mtime = dir->i_atime = current_time(dir);
@@ -1311,7 +1329,6 @@ del_out:
                 */
                new_ei->dir.dir = DIR_DELETED;
        }
-       exfat_clear_volume_dirty(sb);
 out:
        return ret;
 }
index 9f892903419aeaa7521f75ecc91999bc3f52912b..8ca21e7917d16a8806c4debb09b1a4e2197a4ac8 100644 (file)
@@ -100,7 +100,6 @@ static int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flags)
 {
        struct exfat_sb_info *sbi = EXFAT_SB(sb);
        struct boot_sector *p_boot = (struct boot_sector *)sbi->boot_bh->b_data;
-       bool sync;
 
        /* retain persistent-flags */
        new_flags |= sbi->vol_flags_persistent;
@@ -119,16 +118,11 @@ static int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flags)
 
        p_boot->vol_flags = cpu_to_le16(new_flags);
 
-       if ((new_flags & VOLUME_DIRTY) && !buffer_dirty(sbi->boot_bh))
-               sync = true;
-       else
-               sync = false;
-
        set_buffer_uptodate(sbi->boot_bh);
        mark_buffer_dirty(sbi->boot_bh);
 
-       if (sync)
-               sync_dirty_buffer(sbi->boot_bh);
+       __sync_dirty_buffer(sbi->boot_bh, REQ_SYNC | REQ_FUA | REQ_PREFLUSH);
+
        return 0;
 }
 
@@ -174,6 +168,8 @@ static int exfat_show_options(struct seq_file *m, struct dentry *root)
                seq_puts(m, ",errors=remount-ro");
        if (opts->discard)
                seq_puts(m, ",discard");
+       if (opts->keep_last_dots)
+               seq_puts(m, ",keep_last_dots");
        if (opts->time_offset)
                seq_printf(m, ",time_offset=%d", opts->time_offset);
        return 0;
@@ -217,6 +213,7 @@ enum {
        Opt_charset,
        Opt_errors,
        Opt_discard,
+       Opt_keep_last_dots,
        Opt_time_offset,
 
        /* Deprecated options */
@@ -243,6 +240,7 @@ static const struct fs_parameter_spec exfat_parameters[] = {
        fsparam_string("iocharset",             Opt_charset),
        fsparam_enum("errors",                  Opt_errors, exfat_param_enums),
        fsparam_flag("discard",                 Opt_discard),
+       fsparam_flag("keep_last_dots",          Opt_keep_last_dots),
        fsparam_s32("time_offset",              Opt_time_offset),
        __fsparam(NULL, "utf8",                 Opt_utf8, fs_param_deprecated,
                  NULL),
@@ -297,6 +295,9 @@ static int exfat_parse_param(struct fs_context *fc, struct fs_parameter *param)
        case Opt_discard:
                opts->discard = 1;
                break;
+       case Opt_keep_last_dots:
+               opts->keep_last_dots = 1;
+               break;
        case Opt_time_offset:
                /*
                 * Make the limit 24 just in case someone invents something
index 8bd66cdc41be2e464d6062a4349b6f0faf1c9e4d..6feb07e3e1eb595b9cc3feb14a182994c82a4cd1 100644 (file)
@@ -267,7 +267,7 @@ static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
                goto out;
 
        current->backing_dev_info = inode_to_bdi(inode);
-       ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos);
+       ret = generic_perform_write(iocb, from);
        current->backing_dev_info = NULL;
 
 out:
index 1ce13f69fbecb2d65c21ae27f905cdd5fda0bab6..13740f2d0e6109a88bae9f83c0f1ff08bad99bce 100644 (file)
@@ -3589,7 +3589,7 @@ const struct iomap_ops ext4_iomap_report_ops = {
 static bool ext4_journalled_dirty_folio(struct address_space *mapping,
                struct folio *folio)
 {
-       WARN_ON_ONCE(!page_has_buffers(&folio->page));
+       WARN_ON_ONCE(!folio_buffers(folio));
        folio_set_checked(folio);
        return filemap_dirty_folio(mapping, folio);
 }
index 1aa26d6634fc9d7248dab6b96efff2c6b4e0f1d3..af491e170c4a9010819ef847c6ba0a1481749de9 100644 (file)
@@ -109,7 +109,7 @@ static void verity_work(struct work_struct *work)
        struct bio *bio = ctx->bio;
 
        /*
-        * fsverity_verify_bio() may call readpages() again, and although verity
+        * fsverity_verify_bio() may call readahead() again, and although verity
         * will be disabled for that, decryption may still be needed, causing
         * another bio_post_read_ctx to be allocated.  So to guarantee that
         * mempool_alloc() never deadlocks we must free the current ctx first.
index a8fc4fa511a80ebb6021c723b189b70c354077d6..f5366feea82dcfce0e33653e4001109b78755b7e 100644 (file)
@@ -456,7 +456,7 @@ static bool f2fs_dirty_meta_folio(struct address_space *mapping,
                folio_mark_uptodate(folio);
        if (!folio_test_dirty(folio)) {
                filemap_dirty_folio(mapping, folio);
-               inc_page_count(F2FS_P_SB(&folio->page), F2FS_DIRTY_META);
+               inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_META);
                set_page_private_reference(&folio->page);
                return true;
        }
index f8fcbe91059b274e7572f8e43468686773aaf854..8e0c2e773c8d92f460622673a5731e66554f19dc 100644 (file)
@@ -164,7 +164,7 @@ static void f2fs_verify_bio(struct work_struct *work)
        bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
 
        /*
-        * fsverity_verify_bio() may call readpages() again, and while verity
+        * fsverity_verify_bio() may call readahead() again, and while verity
         * will be disabled for this, decryption and/or decompression may still
         * be needed, resulting in another bio_post_read_ctx being allocated.
         * So to prevent deadlocks we need to release the current ctx to the
@@ -2392,7 +2392,7 @@ static void f2fs_readahead(struct readahead_control *rac)
        if (!f2fs_is_compress_backend_ready(inode))
                return;
 
-       /* If the file has inline data, skip readpages */
+       /* If the file has inline data, skip readahead */
        if (f2fs_has_inline_data(inode))
                return;
 
@@ -3571,7 +3571,7 @@ static bool f2fs_dirty_data_folio(struct address_space *mapping,
                f2fs_update_dirty_folio(inode, folio);
                return true;
        }
-       return true;
+       return false;
 }
 
 
index d3f39a704b8b1d6a4fec20ee0d78c4b0e404c45f..5b89af0f27f053265672a30e595ad4db26a2905c 100644 (file)
@@ -4448,7 +4448,7 @@ static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb,
                return -EOPNOTSUPP;
 
        current->backing_dev_info = inode_to_bdi(inode);
-       ret = generic_perform_write(file, from, iocb->ki_pos);
+       ret = generic_perform_write(iocb, from);
        current->backing_dev_info = NULL;
 
        if (ret > 0) {
index 0b6e741e94a0f686fd269dbd99892f2f6c3f378b..c45d341dcf6e536fd1f5458621dcdb61cfa9f592 100644 (file)
@@ -2146,11 +2146,11 @@ static bool f2fs_dirty_node_folio(struct address_space *mapping,
                folio_mark_uptodate(folio);
 #ifdef CONFIG_F2FS_CHECK_FS
        if (IS_INODE(&folio->page))
-               f2fs_inode_chksum_set(F2FS_P_SB(&folio->page), &folio->page);
+               f2fs_inode_chksum_set(F2FS_M_SB(mapping), &folio->page);
 #endif
        if (!folio_test_dirty(folio)) {
                filemap_dirty_folio(mapping, folio);
-               inc_page_count(F2FS_P_SB(&folio->page), F2FS_DIRTY_NODES);
+               inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
                set_page_private_reference(&folio->page);
                return true;
        }
index 7d2e692b66a94fcfc609747802517e27489b8a0c..ada8fe814db97d118e10506382e38a02a6c9bc2b 100644 (file)
@@ -412,6 +412,7 @@ void __fput_sync(struct file *file)
 }
 
 EXPORT_SYMBOL(fput);
+EXPORT_SYMBOL(__fput_sync);
 
 void __init files_init(void)
 {
index 76316c4a3fb7f08bbbbe4dba8175e6aad6b1b100..b313a978ae0a25cfbf44a866f178b2ecacb96aaa 100644 (file)
@@ -38,6 +38,3 @@ config FSCACHE_DEBUG
          enabled by setting bits in /sys/modules/fscache/parameter/debug.
 
          See Documentation/filesystems/caching/fscache.rst for more information.
-
-config FSCACHE_OLD_API
-       bool
index 2749933852a991c3f3ce58bc03bd7bcba2ac9d29..d645f8b302a27882c86c3c46e134dd5bcbc35cef 100644 (file)
@@ -214,7 +214,7 @@ void fscache_relinquish_cache(struct fscache_cache *cache)
 
        cache->ops = NULL;
        cache->cache_priv = NULL;
-       smp_store_release(&cache->state, FSCACHE_CACHE_IS_NOT_PRESENT);
+       fscache_set_cache_state(cache, FSCACHE_CACHE_IS_NOT_PRESENT);
        fscache_put_cache(cache, where);
 }
 EXPORT_SYMBOL(fscache_relinquish_cache);
index 9bb1ab5fe5ed15078a9d738edec04e852588235f..9d3cf01117093da9aa133bc83066b0338466e74a 100644 (file)
@@ -30,7 +30,7 @@ static DEFINE_SPINLOCK(fscache_cookie_lru_lock);
 DEFINE_TIMER(fscache_cookie_lru_timer, fscache_cookie_lru_timed_out);
 static DECLARE_WORK(fscache_cookie_lru_work, fscache_cookie_lru_worker);
 static const char fscache_cookie_states[FSCACHE_COOKIE_STATE__NR] = "-LCAIFUWRD";
-unsigned int fscache_lru_cookie_timeout = 10 * HZ;
+static unsigned int fscache_lru_cookie_timeout = 10 * HZ;
 
 void fscache_print_cookie(struct fscache_cookie *cookie, char prefix)
 {
@@ -1069,6 +1069,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie,
 }
 EXPORT_SYMBOL(__fscache_invalidate);
 
+#ifdef CONFIG_PROC_FS
 /*
  * Generate a list of extant cookies in /proc/fs/fscache/cookies
  */
@@ -1145,3 +1146,4 @@ const struct seq_operations fscache_cookies_seq_ops = {
        .stop   = fscache_cookies_seq_stop,
        .show   = fscache_cookies_seq_show,
 };
+#endif
index f121c21590dcf2950f404d2d76ef32f998fa0425..1336f517e9b1a60a41281f2f9e21d3e9dddcd025 100644 (file)
@@ -56,7 +56,9 @@ static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache,
  * cookie.c
  */
 extern struct kmem_cache *fscache_cookie_jar;
+#ifdef CONFIG_PROC_FS
 extern const struct seq_operations fscache_cookies_seq_ops;
+#endif
 extern struct timer_list fscache_cookie_lru_timer;
 
 extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix);
@@ -70,17 +72,6 @@ static inline void fscache_see_cookie(struct fscache_cookie *cookie,
                             where);
 }
 
-/*
- * io.c
- */
-static inline void fscache_end_operation(struct netfs_cache_resources *cres)
-{
-       const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
-
-       if (ops)
-               ops->end_operation(cres);
-}
-
 /*
  * main.c
  */
@@ -148,7 +139,9 @@ int fscache_stats_show(struct seq_file *m, void *v);
 /*
  * volume.c
  */
+#ifdef CONFIG_PROC_FS
 extern const struct seq_operations fscache_volumes_seq_ops;
+#endif
 
 struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
                                          enum fscache_volume_trace where);
index c8c7fe9e9a6ec0d9f0df27f4b8f3b84daf4425bf..3af3b08a9bb3f1ba21b249d48586145911bf7863 100644 (file)
@@ -235,8 +235,7 @@ static void fscache_wreq_done(void *priv, ssize_t transferred_or_error,
 {
        struct fscache_write_request *wreq = priv;
 
-       fscache_clear_page_bits(fscache_cres_cookie(&wreq->cache_resources),
-                               wreq->mapping, wreq->start, wreq->len,
+       fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len,
                                wreq->set_bits);
 
        if (wreq->term_func)
@@ -296,7 +295,7 @@ abandon_end:
 abandon_free:
        kfree(wreq);
 abandon:
-       fscache_clear_page_bits(cookie, mapping, start, len, cond);
+       fscache_clear_page_bits(mapping, start, len, cond);
        if (term_func)
                term_func(term_func_priv, ret, false);
 }
index eac4984cc753c2298e135a28f0f8f2e8ec0d9b17..488b460e046f4c679cf7b9c328bca6d9e9528774 100644 (file)
@@ -627,7 +627,7 @@ struct fuse_conn {
        /** Connection successful.  Only set in INIT */
        unsigned conn_init:1;
 
-       /** Do readpages asynchronously?  Only set in INIT */
+       /** Do readahead asynchronously?  Only set in INIT */
        unsigned async_read:1;
 
        /** Return an unique read error after abort.  Only set in INIT */
index d67108489148eef281128c6841c52135abf74050..39080b2d6cf86d26287b6e91b134997b5a1d5dff 100644 (file)
@@ -606,9 +606,9 @@ out:
        return ret;
 }
 
-static inline __be64 *gfs2_indirect_init(struct metapath *mp,
-                                        struct gfs2_glock *gl, unsigned int i,
-                                        unsigned offset, u64 bn)
+static inline void gfs2_indirect_init(struct metapath *mp,
+                                     struct gfs2_glock *gl, unsigned int i,
+                                     unsigned offset, u64 bn)
 {
        __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
                       ((i > 1) ? sizeof(struct gfs2_meta_header) :
@@ -621,7 +621,6 @@ static inline __be64 *gfs2_indirect_init(struct metapath *mp,
        gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
        ptr += offset;
        *ptr = cpu_to_be64(bn);
-       return ptr;
 }
 
 enum alloc_state {
@@ -2146,7 +2145,7 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
 
        ret = do_shrink(inode, newsize);
 out:
-       gfs2_rs_delete(ip, NULL);
+       gfs2_rs_delete(ip);
        gfs2_qa_put(ip);
        return ret;
 }
index 8c39a8571b1fa9bf4db49405dfcbdd73184181bb..22b41acfbbc39a008dddb61d2b66fee883cb06c2 100644 (file)
@@ -706,7 +706,7 @@ static int gfs2_release(struct inode *inode, struct file *file)
 
        if (file->f_mode & FMODE_WRITE) {
                if (gfs2_rs_active(&ip->i_res))
-                       gfs2_rs_delete(ip, &inode->i_writecount);
+                       gfs2_rs_delete(ip);
                gfs2_qa_put(ip);
        }
        return 0;
@@ -775,8 +775,7 @@ static inline bool should_fault_in_pages(ssize_t ret, struct iov_iter *i,
                                         size_t *window_size)
 {
        size_t count = iov_iter_count(i);
-       char __user *p;
-       int pages = 1;
+       size_t size, offs;
 
        if (likely(!count))
                return false;
@@ -785,18 +784,20 @@ static inline bool should_fault_in_pages(ssize_t ret, struct iov_iter *i,
        if (!iter_is_iovec(i))
                return false;
 
+       size = PAGE_SIZE;
+       offs = offset_in_page(i->iov[0].iov_base + i->iov_offset);
        if (*prev_count != count || !*window_size) {
-               int pages, nr_dirtied;
+               size_t nr_dirtied;
 
-               pages = min_t(int, BIO_MAX_VECS, DIV_ROUND_UP(count, PAGE_SIZE));
+               size = ALIGN(offs + count, PAGE_SIZE);
+               size = min_t(size_t, size, SZ_1M);
                nr_dirtied = max(current->nr_dirtied_pause -
-                                current->nr_dirtied, 1);
-               pages = min(pages, nr_dirtied);
+                                current->nr_dirtied, 8);
+               size = min(size, nr_dirtied << PAGE_SHIFT);
        }
 
        *prev_count = count;
-       p = i->iov[0].iov_base + i->iov_offset;
-       *window_size = (size_t)PAGE_SIZE * pages - offset_in_page(p);
+       *window_size = size - offs;
        return true;
 }
 
@@ -851,9 +852,9 @@ retry_under_glock:
                leftover = fault_in_iov_iter_writeable(to, window_size);
                gfs2_holder_disallow_demote(gh);
                if (leftover != window_size) {
-                       if (!gfs2_holder_queued(gh))
-                               goto retry;
-                       goto retry_under_glock;
+                       if (gfs2_holder_queued(gh))
+                               goto retry_under_glock;
+                       goto retry;
                }
        }
        if (gfs2_holder_queued(gh))
@@ -920,9 +921,9 @@ retry_under_glock:
                leftover = fault_in_iov_iter_readable(from, window_size);
                gfs2_holder_disallow_demote(gh);
                if (leftover != window_size) {
-                       if (!gfs2_holder_queued(gh))
-                               goto retry;
-                       goto retry_under_glock;
+                       if (gfs2_holder_queued(gh))
+                               goto retry_under_glock;
+                       goto retry;
                }
        }
 out:
@@ -950,20 +951,19 @@ static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
         * and retry.
         */
 
-       if (iocb->ki_flags & IOCB_DIRECT) {
-               ret = gfs2_file_direct_read(iocb, to, &gh);
-               if (likely(ret != -ENOTBLK))
-                       return ret;
-               iocb->ki_flags &= ~IOCB_DIRECT;
-       }
+       if (iocb->ki_flags & IOCB_DIRECT)
+               return gfs2_file_direct_read(iocb, to, &gh);
+
+       pagefault_disable();
        iocb->ki_flags |= IOCB_NOIO;
        ret = generic_file_read_iter(iocb, to);
        iocb->ki_flags &= ~IOCB_NOIO;
+       pagefault_enable();
        if (ret >= 0) {
                if (!iov_iter_count(to))
                        return ret;
                written = ret;
-       } else {
+       } else if (ret != -EFAULT) {
                if (ret != -EAGAIN)
                        return ret;
                if (iocb->ki_flags & IOCB_NOWAIT)
@@ -989,12 +989,11 @@ retry_under_glock:
                leftover = fault_in_iov_iter_writeable(to, window_size);
                gfs2_holder_disallow_demote(&gh);
                if (leftover != window_size) {
-                       if (!gfs2_holder_queued(&gh)) {
-                               if (written)
-                                       goto out_uninit;
-                               goto retry;
-                       }
-                       goto retry_under_glock;
+                       if (gfs2_holder_queued(&gh))
+                               goto retry_under_glock;
+                       if (written)
+                               goto out_uninit;
+                       goto retry;
                }
        }
        if (gfs2_holder_queued(&gh))
@@ -1068,12 +1067,11 @@ retry_under_glock:
                gfs2_holder_disallow_demote(gh);
                if (leftover != window_size) {
                        from->count = min(from->count, window_size - leftover);
-                       if (!gfs2_holder_queued(gh)) {
-                               if (read)
-                                       goto out_uninit;
-                               goto retry;
-                       }
-                       goto retry_under_glock;
+                       if (gfs2_holder_queued(gh))
+                               goto retry_under_glock;
+                       if (read && !(iocb->ki_flags & IOCB_DIRECT))
+                               goto out_uninit;
+                       goto retry;
                }
        }
 out_unlock:
@@ -1083,6 +1081,7 @@ out_uninit:
        gfs2_holder_uninit(gh);
        if (statfs_gh)
                kfree(statfs_gh);
+       from->count = orig_count - read;
        return read ? read : ret;
 }
 
@@ -1497,7 +1496,6 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
                if (error != GLR_TRYFAILED)
                        break;
                fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
-               fl_gh->gh_error = 0;
                msleep(sleeptime);
        }
        if (error) {
index 6b23399eaee0cbcb6ea340a6741faf86957e4580..630c6550eacfef96f273ead0e3d993eff63bc825 100644 (file)
@@ -542,7 +542,7 @@ restart:
                         * some reason. If this holder is the head of the list, it
                         * means we have a blocked holder at the head, so return 1.
                         */
-                       if (gh->gh_list.prev == &gl->gl_holders)
+                       if (list_is_first(&gh->gh_list, &gl->gl_holders))
                                return 1;
                        do_error(gl, 0);
                        break;
@@ -669,6 +669,8 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
 
        /* Check for state != intended state */
        if (unlikely(state != gl->gl_target)) {
+               if (gh && (ret & LM_OUT_CANCELED))
+                       gfs2_holder_wake(gh);
                if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
                        /* move to back of queue and try next entry */
                        if (ret & LM_OUT_CANCELED) {
@@ -1259,7 +1261,6 @@ void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
        gh->gh_owner_pid = get_pid(task_pid(current));
        gh->gh_state = state;
        gh->gh_flags = flags;
-       gh->gh_error = 0;
        gh->gh_iflags = 0;
        gfs2_glock_hold(gl);
 }
@@ -1565,6 +1566,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
        if (test_bit(GLF_LRU, &gl->gl_flags))
                gfs2_glock_remove_from_lru(gl);
 
+       gh->gh_error = 0;
        spin_lock(&gl->gl_lockref.lock);
        add_to_queue(gh);
        if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
@@ -1691,6 +1693,14 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
        struct gfs2_glock *gl = gh->gh_gl;
 
        spin_lock(&gl->gl_lockref.lock);
+       if (list_is_first(&gh->gh_list, &gl->gl_holders) &&
+           !test_bit(HIF_HOLDER, &gh->gh_iflags)) {
+               spin_unlock(&gl->gl_lockref.lock);
+               gl->gl_name.ln_sbd->sd_lockstruct.ls_ops->lm_cancel(gl);
+               wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
+               spin_lock(&gl->gl_lockref.lock);
+       }
+
        __gfs2_glock_dq(gh);
        spin_unlock(&gl->gl_lockref.lock);
 }
index 89905f4f29bb6de91e181373c4f168431f3e8e74..c8ec876f33ea35787d05d2d69c81ca91854ab685 100644 (file)
@@ -131,7 +131,21 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
                struct gfs2_sbd *sdp = GFS2_SB(inode);
                struct gfs2_glock *io_gl;
 
-               error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
+               error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE,
+                                      &ip->i_gl);
+               if (unlikely(error))
+                       goto fail;
+
+               error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE,
+                                      &io_gl);
+               if (unlikely(error))
+                       goto fail;
+
+               if (blktype != GFS2_BLKST_UNLINKED)
+                       gfs2_cancel_delete_work(io_gl);
+               error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT,
+                                          &ip->i_iopen_gh);
+               gfs2_glock_put(io_gl);
                if (unlikely(error))
                        goto fail;
 
@@ -161,16 +175,6 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
 
                set_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags);
 
-               error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
-               if (unlikely(error))
-                       goto fail;
-               if (blktype != GFS2_BLKST_UNLINKED)
-                       gfs2_cancel_delete_work(io_gl);
-               error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
-               gfs2_glock_put(io_gl);
-               if (unlikely(error))
-                       goto fail;
-
                /* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */
                inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1);
                inode->i_atime.tv_nsec = 0;
@@ -716,13 +720,17 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        error = insert_inode_locked4(inode, ip->i_no_addr, iget_test, &ip->i_no_addr);
        BUG_ON(error);
 
-       error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
+       error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
        if (error)
                goto fail_gunlock2;
 
+       error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
+       if (error)
+               goto fail_gunlock3;
+
        error = gfs2_trans_begin(sdp, blocks, 0);
        if (error)
-               goto fail_gunlock2;
+               goto fail_gunlock3;
 
        if (blocks > 1) {
                ip->i_eattr = ip->i_no_addr + 1;
@@ -731,10 +739,6 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        init_dinode(dip, ip, symname);
        gfs2_trans_end(sdp);
 
-       error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
-       if (error)
-               goto fail_gunlock2;
-
        glock_set_object(ip->i_gl, ip);
        glock_set_object(io_gl, ip);
        gfs2_set_iop(inode);
@@ -745,14 +749,14 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        if (default_acl) {
                error = __gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
                if (error)
-                       goto fail_gunlock3;
+                       goto fail_gunlock4;
                posix_acl_release(default_acl);
                default_acl = NULL;
        }
        if (acl) {
                error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
                if (error)
-                       goto fail_gunlock3;
+                       goto fail_gunlock4;
                posix_acl_release(acl);
                acl = NULL;
        }
@@ -760,11 +764,11 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        error = security_inode_init_security(&ip->i_inode, &dip->i_inode, name,
                                             &gfs2_initxattrs, NULL);
        if (error)
-               goto fail_gunlock3;
+               goto fail_gunlock4;
 
        error = link_dinode(dip, name, ip, &da);
        if (error)
-               goto fail_gunlock3;
+               goto fail_gunlock4;
 
        mark_inode_dirty(inode);
        d_instantiate(dentry, inode);
@@ -782,9 +786,10 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        unlock_new_inode(inode);
        return error;
 
-fail_gunlock3:
+fail_gunlock4:
        glock_clear_object(ip->i_gl, ip);
        glock_clear_object(io_gl, ip);
+fail_gunlock3:
        gfs2_glock_dq_uninit(&ip->i_iopen_gh);
 fail_gunlock2:
        gfs2_glock_put(io_gl);
@@ -793,7 +798,7 @@ fail_free_inode:
                if (free_vfs_inode) /* else evict will do the put for us */
                        gfs2_glock_put(ip->i_gl);
        }
-       gfs2_rs_delete(ip, NULL);
+       gfs2_rs_deltree(&ip->i_res);
        gfs2_qa_put(ip);
 fail_free_acls:
        posix_acl_release(default_acl);
index 50578f881e6de18bacb6f9af00d4bd72eade4a47..2559a79cf14beaab1960fd3aeb08abcb67b9a61a 100644 (file)
@@ -261,6 +261,7 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
        int req;
        u32 lkf;
        char strname[GDLM_STRNAME_BYTES] = "";
+       int error;
 
        req = make_mode(gl->gl_name.ln_sbd, req_state);
        lkf = make_flags(gl, flags, req);
@@ -279,8 +280,14 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
         * Submit the actual lock request.
         */
 
-       return dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname,
+again:
+       error = dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname,
                        GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
+       if (error == -EBUSY) {
+               msleep(20);
+               goto again;
+       }
+       return error;
 }
 
 static void gdlm_put_lock(struct gfs2_glock *gl)
@@ -312,8 +319,14 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
                return;
        }
 
+again:
        error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
                           NULL, gl);
+       if (error == -EBUSY) {
+               msleep(20);
+               goto again;
+       }
+
        if (error) {
                fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n",
                       gl->gl_name.ln_type,
index 0fb3c01bc55773b73ab1da616f8f6544c1b06366..801ad9f4f2bef9cc4b0ec335cc794a6c572d733f 100644 (file)
@@ -680,13 +680,14 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
 /**
  * gfs2_rs_delete - delete a multi-block reservation
  * @ip: The inode for this reservation
- * @wcount: The inode's write count, or NULL
  *
  */
-void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount)
+void gfs2_rs_delete(struct gfs2_inode *ip)
 {
+       struct inode *inode = &ip->i_inode;
+
        down_write(&ip->i_rw_mutex);
-       if ((wcount == NULL) || (atomic_read(wcount) <= 1))
+       if (atomic_read(&inode->i_writecount) <= 1)
                gfs2_rs_deltree(&ip->i_res);
        up_write(&ip->i_rw_mutex);
 }
@@ -922,15 +923,15 @@ static int read_rindex_entry(struct gfs2_inode *ip)
        spin_lock_init(&rgd->rd_rsspin);
        mutex_init(&rgd->rd_mutex);
 
-       error = compute_bitstructs(rgd);
-       if (error)
-               goto fail;
-
        error = gfs2_glock_get(sdp, rgd->rd_addr,
                               &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
        if (error)
                goto fail;
 
+       error = compute_bitstructs(rgd);
+       if (error)
+               goto fail_glock;
+
        rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
        rgd->rd_flags &= ~GFS2_RDF_PREFERRED;
        if (rgd->rd_data > sdp->sd_max_rg_data)
@@ -944,6 +945,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
        }
 
        error = 0; /* someone else read in the rgrp; free it and ignore it */
+fail_glock:
        gfs2_glock_put(rgd->rd_gl);
 
 fail:
@@ -1415,7 +1417,8 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
 
        start = r.start >> bs_shift;
        end = start + (r.len >> bs_shift);
-       minlen = max_t(u64, r.minlen,
+       minlen = max_t(u64, r.minlen, sdp->sd_sb.sb_bsize);
+       minlen = max_t(u64, minlen,
                       q->limits.discard_granularity) >> bs_shift;
 
        if (end <= start || minlen > sdp->sd_max_rg_data)
index 3e2ca1fb43056c6520913357ec5ba10d9fdf7566..46dd94e9e085c20748e744c1eaa13340d959060a 100644 (file)
@@ -45,7 +45,7 @@ extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
                             bool dinode, u64 *generation);
 
 extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
-extern void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount);
+extern void gfs2_rs_delete(struct gfs2_inode *ip);
 extern void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
                               u64 bstart, u32 blen, int meta);
 extern void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
index cf9cf66522b394300046d724f821162978ab3fd5..bdb773e5c88f05596f11f4c5ac3c96e868bd6dd5 100644 (file)
@@ -1396,7 +1396,7 @@ out:
        truncate_inode_pages_final(&inode->i_data);
        if (ip->i_qadata)
                gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
-       gfs2_rs_delete(ip, NULL);
+       gfs2_rs_deltree(&ip->i_res);
        gfs2_ordered_del_inode(ip);
        clear_inode(inode);
        gfs2_dir_hash_inval(ip);
index fb2c2ea807d7466f908a0ae113679fbf55d1c5f6..08503dc68d2b1ef7f2cc0f6b0986654036dcdc1a 100644 (file)
@@ -74,7 +74,7 @@ int do_linkat(int olddfd, struct filename *old, int newdfd,
  * namespace.c
  */
 extern struct vfsmount *lookup_mnt(const struct path *);
-extern int finish_automount(struct vfsmount *, struct path *);
+extern int finish_automount(struct vfsmount *, const struct path *);
 
 extern int sb_prepare_remount_readonly(struct super_block *);
 
index b94d57c1b0e5fc68bb7ece5419b33d8ae5fa6d92..4479013854d200c689b5864ce3b46d9170a6042a 100644 (file)
@@ -63,7 +63,6 @@
 #include <net/sock.h>
 #include <net/af_unix.h>
 #include <net/scm.h>
-#include <net/busy_poll.h>
 #include <linux/anon_inodes.h>
 #include <linux/sched/mm.h>
 #include <linux/uaccess.h>
                        IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
 
 #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
-                               REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
-                               REQ_F_ASYNC_DATA)
+                               REQ_F_POLLED | REQ_F_CREDS | REQ_F_ASYNC_DATA)
 
 #define IO_TCTX_REFS_CACHE_NR  (1U << 10)
 
@@ -412,11 +410,6 @@ struct io_ring_ctx {
        struct list_head        sqd_list;
 
        unsigned long           check_cq_overflow;
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       /* used to track busy poll napi_id */
-       struct list_head        napi_list;
-       spinlock_t              napi_lock;      /* napi_list lock */
-#endif
 
        struct {
                unsigned                cached_cq_tail;
@@ -500,7 +493,6 @@ struct io_uring_task {
        const struct io_ring_ctx *last;
        struct io_wq            *io_wq;
        struct percpu_counter   inflight;
-       atomic_t                inflight_tracked;
        atomic_t                in_idle;
 
        spinlock_t              task_lock;
@@ -592,7 +584,8 @@ struct io_rw {
        /* NOTE: kiocb has the file as the first member, so don't do it here */
        struct kiocb                    kiocb;
        u64                             addr;
-       u64                             len;
+       u32                             len;
+       u32                             flags;
 };
 
 struct io_connect {
@@ -611,6 +604,7 @@ struct io_sr_msg {
        int                             msg_flags;
        int                             bgid;
        size_t                          len;
+       size_t                          done_io;
 };
 
 struct io_open {
@@ -653,10 +647,10 @@ struct io_epoll {
 
 struct io_splice {
        struct file                     *file_out;
-       struct file                     *file_in;
        loff_t                          off_out;
        loff_t                          off_in;
        u64                             len;
+       int                             splice_fd_in;
        unsigned int                    flags;
 };
 
@@ -781,6 +775,7 @@ enum {
        REQ_F_SKIP_LINK_CQES_BIT,
        REQ_F_SINGLE_POLL_BIT,
        REQ_F_DOUBLE_POLL_BIT,
+       REQ_F_PARTIAL_IO_BIT,
        /* keep async read/write and isreg together and in order */
        REQ_F_SUPPORT_NOWAIT_BIT,
        REQ_F_ISREG_BIT,
@@ -843,6 +838,8 @@ enum {
        REQ_F_SINGLE_POLL       = BIT(REQ_F_SINGLE_POLL_BIT),
        /* double poll may active */
        REQ_F_DOUBLE_POLL       = BIT(REQ_F_DOUBLE_POLL_BIT),
+       /* request has already done partial IO */
+       REQ_F_PARTIAL_IO        = BIT(REQ_F_PARTIAL_IO_BIT),
 };
 
 struct async_poll {
@@ -910,7 +907,11 @@ struct io_kiocb {
 
        u64                             user_data;
        u32                             result;
-       u32                             cflags;
+       /* fd initially, then cflags for completion */
+       union {
+               u32                     cflags;
+               int                     fd;
+       };
 
        struct io_ring_ctx              *ctx;
        struct task_struct              *task;
@@ -919,11 +920,14 @@ struct io_kiocb {
        /* store used ubuf, so we can prevent reloading */
        struct io_mapped_ubuf           *imu;
 
-       /* used by request caches, completion batching and iopoll */
-       struct io_wq_work_node          comp_list;
+       union {
+               /* used by request caches, completion batching and iopoll */
+               struct io_wq_work_node  comp_list;
+               /* cache ->apoll->events */
+               int apoll_events;
+       };
        atomic_t                        refs;
        atomic_t                        poll_refs;
-       struct io_kiocb                 *link;
        struct io_task_work             io_task_work;
        /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
        struct hlist_node               hash_node;
@@ -931,9 +935,11 @@ struct io_kiocb {
        struct async_poll               *apoll;
        /* opcode allocated if it needs to store data for async defer */
        void                            *async_data;
-       /* custom credentials, valid IFF REQ_F_CREDS is set */
        /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
        struct io_buffer                *kbuf;
+       /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
+       struct io_kiocb                 *link;
+       /* custom credentials, valid IFF REQ_F_CREDS is set */
        const struct cred               *creds;
        struct io_wq_work               work;
 };
@@ -962,6 +968,7 @@ struct io_op_def {
        /* set if opcode supports polled "wait" */
        unsigned                pollin : 1;
        unsigned                pollout : 1;
+       unsigned                poll_exclusive : 1;
        /* op supports buffer selection */
        unsigned                buffer_select : 1;
        /* do prep async if is going to be punted */
@@ -1056,6 +1063,7 @@ static const struct io_op_def io_op_defs[] = {
                .needs_file             = 1,
                .unbound_nonreg_file    = 1,
                .pollin                 = 1,
+               .poll_exclusive         = 1,
        },
        [IORING_OP_ASYNC_CANCEL] = {
                .audit_skip             = 1,
@@ -1175,8 +1183,11 @@ static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
                                     struct io_uring_rsrc_update2 *up,
                                     unsigned nr_args);
 static void io_clean_op(struct io_kiocb *req);
-static struct file *io_file_get(struct io_ring_ctx *ctx,
-                               struct io_kiocb *req, int fd, bool fixed);
+static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
+                                            unsigned issue_flags);
+static inline struct file *io_file_get_normal(struct io_kiocb *req, int fd);
+static void io_drop_inflight_file(struct io_kiocb *req);
+static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags);
 static void __io_queue_sqe(struct io_kiocb *req);
 static void io_rsrc_put_work(struct work_struct *work);
 
@@ -1306,13 +1317,20 @@ static void io_rsrc_refs_refill(struct io_ring_ctx *ctx)
 }
 
 static inline void io_req_set_rsrc_node(struct io_kiocb *req,
-                                       struct io_ring_ctx *ctx)
+                                       struct io_ring_ctx *ctx,
+                                       unsigned int issue_flags)
 {
        if (!req->fixed_rsrc_refs) {
                req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
-               ctx->rsrc_cached_refs--;
-               if (unlikely(ctx->rsrc_cached_refs < 0))
-                       io_rsrc_refs_refill(ctx);
+
+               if (!(issue_flags & IO_URING_F_UNLOCKED)) {
+                       lockdep_assert_held(&ctx->uring_lock);
+                       ctx->rsrc_cached_refs--;
+                       if (unlikely(ctx->rsrc_cached_refs < 0))
+                               io_rsrc_refs_refill(ctx);
+               } else {
+                       percpu_ref_get(req->fixed_rsrc_refs);
+               }
        }
 }
 
@@ -1330,6 +1348,8 @@ static unsigned int __io_put_kbuf(struct io_kiocb *req, struct list_head *list)
 
 static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
 {
+       lockdep_assert_held(&req->ctx->completion_lock);
+
        if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
                return 0;
        return __io_put_kbuf(req, &req->ctx->io_buffers_comp);
@@ -1362,6 +1382,8 @@ static inline unsigned int io_put_kbuf(struct io_kiocb *req,
                cflags = __io_put_kbuf(req, &ctx->io_buffers_comp);
                spin_unlock(&ctx->completion_lock);
        } else {
+               lockdep_assert_held(&req->ctx->uring_lock);
+
                cflags = __io_put_kbuf(req, &req->ctx->io_buffers_cache);
        }
 
@@ -1382,7 +1404,7 @@ static struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
        return NULL;
 }
 
-static void io_kbuf_recycle(struct io_kiocb *req)
+static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
 {
        struct io_ring_ctx *ctx = req->ctx;
        struct io_buffer_list *bl;
@@ -1390,6 +1412,12 @@ static void io_kbuf_recycle(struct io_kiocb *req)
 
        if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
                return;
+       /* don't recycle if we already did IO to this buffer */
+       if (req->flags & REQ_F_PARTIAL_IO)
+               return;
+
+       if (issue_flags & IO_URING_F_UNLOCKED)
+               mutex_lock(&ctx->uring_lock);
 
        lockdep_assert_held(&ctx->uring_lock);
 
@@ -1398,35 +1426,18 @@ static void io_kbuf_recycle(struct io_kiocb *req)
        list_add(&buf->list, &bl->buf_list);
        req->flags &= ~REQ_F_BUFFER_SELECTED;
        req->kbuf = NULL;
+
+       if (issue_flags & IO_URING_F_UNLOCKED)
+               mutex_unlock(&ctx->uring_lock);
 }
 
 static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
                          bool cancel_all)
        __must_hold(&req->ctx->timeout_lock)
 {
-       struct io_kiocb *req;
-
        if (task && head->task != task)
                return false;
-       if (cancel_all)
-               return true;
-
-       io_for_each_link(req, head) {
-               if (req->flags & REQ_F_INFLIGHT)
-                       return true;
-       }
-       return false;
-}
-
-static bool io_match_linked(struct io_kiocb *head)
-{
-       struct io_kiocb *req;
-
-       io_for_each_link(req, head) {
-               if (req->flags & REQ_F_INFLIGHT)
-                       return true;
-       }
-       return false;
+       return cancel_all;
 }
 
 /*
@@ -1436,24 +1447,9 @@ static bool io_match_linked(struct io_kiocb *head)
 static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
                               bool cancel_all)
 {
-       bool matched;
-
        if (task && head->task != task)
                return false;
-       if (cancel_all)
-               return true;
-
-       if (head->flags & REQ_F_LINK_TIMEOUT) {
-               struct io_ring_ctx *ctx = head->ctx;
-
-               /* protect against races with linked timeouts */
-               spin_lock_irq(&ctx->timeout_lock);
-               matched = io_match_linked(head);
-               spin_unlock_irq(&ctx->timeout_lock);
-       } else {
-               matched = io_match_linked(head);
-       }
-       return matched;
+       return cancel_all;
 }
 
 static inline bool req_has_async_data(struct io_kiocb *req)
@@ -1575,10 +1571,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
        INIT_WQ_LIST(&ctx->locked_free_list);
        INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
        INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       INIT_LIST_HEAD(&ctx->napi_list);
-       spin_lock_init(&ctx->napi_lock);
-#endif
        return ctx;
 err:
        kfree(ctx->dummy_ubuf);
@@ -1616,14 +1608,6 @@ static inline bool io_req_ffs_set(struct io_kiocb *req)
        return req->flags & REQ_F_FIXED_FILE;
 }
 
-static inline void io_req_track_inflight(struct io_kiocb *req)
-{
-       if (!(req->flags & REQ_F_INFLIGHT)) {
-               req->flags |= REQ_F_INFLIGHT;
-               atomic_inc(&current->io_uring->inflight_tracked);
-       }
-}
-
 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
 {
        if (WARN_ON_ONCE(!req->link))
@@ -1667,14 +1651,6 @@ static void io_prep_async_work(struct io_kiocb *req)
                if (def->unbound_nonreg_file)
                        req->work.flags |= IO_WQ_WORK_UNBOUND;
        }
-
-       switch (req->opcode) {
-       case IORING_OP_SPLICE:
-       case IORING_OP_TEE:
-               if (!S_ISREG(file_inode(req->splice.file_in)->i_mode))
-                       req->work.flags |= IO_WQ_WORK_UNBOUND;
-               break;
-       }
 }
 
 static void io_prep_async_link(struct io_kiocb *req)
@@ -1768,12 +1744,11 @@ static __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
        __must_hold(&ctx->completion_lock)
 {
        u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
+       struct io_kiocb *req, *tmp;
 
        spin_lock_irq(&ctx->timeout_lock);
-       while (!list_empty(&ctx->timeout_list)) {
+       list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
                u32 events_needed, events_got;
-               struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
-                                               struct io_kiocb, timeout.list);
 
                if (io_is_timeout_noseq(req))
                        break;
@@ -1790,7 +1765,6 @@ static __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
                if (events_got < events_needed)
                        break;
 
-               list_del_init(&req->timeout.list);
                io_kill_timeout(req, 0);
        }
        ctx->cq_last_tm_flush = seq;
@@ -2104,6 +2078,12 @@ static void __io_req_complete_post(struct io_kiocb *req, s32 res,
                        }
                }
                io_req_put_rsrc(req, ctx);
+               /*
+                * Selected buffer deallocation in io_clean_op() assumes that
+                * we don't hold ->completion_lock. Clean them here to avoid
+                * deadlocks.
+                */
+               io_put_kbuf_comp(req);
                io_dismantle_req(req);
                io_put_task(req->task, 1);
                wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
@@ -2148,7 +2128,7 @@ static inline void io_req_complete(struct io_kiocb *req, s32 res)
 static void io_req_complete_failed(struct io_kiocb *req, s32 res)
 {
        req_set_fail(req);
-       io_req_complete_post(req, res, io_put_kbuf(req, 0));
+       io_req_complete_post(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
 }
 
 static void io_req_complete_fail_submit(struct io_kiocb *req)
@@ -2437,6 +2417,8 @@ static void handle_prev_tw_list(struct io_wq_work_node *node,
                struct io_kiocb *req = container_of(node, struct io_kiocb,
                                                    io_task_work.node);
 
+               prefetch(container_of(next, struct io_kiocb, io_task_work.node));
+
                if (req->ctx != *ctx) {
                        if (unlikely(!*uring_locked && *ctx))
                                ctx_commit_and_unlock(*ctx);
@@ -2469,6 +2451,8 @@ static void handle_tw_list(struct io_wq_work_node *node,
                struct io_kiocb *req = container_of(node, struct io_kiocb,
                                                    io_task_work.node);
 
+               prefetch(container_of(next, struct io_kiocb, io_task_work.node));
+
                if (req->ctx != *ctx) {
                        ctx_flush_and_put(*ctx, locked);
                        *ctx = req->ctx;
@@ -2532,6 +2516,8 @@ static void io_req_task_work_add(struct io_kiocb *req, bool priority)
 
        WARN_ON_ONCE(!tctx);
 
+       io_drop_inflight_file(req);
+
        spin_lock_irqsave(&tctx->task_lock, flags);
        if (priority)
                wq_list_add_tail(&req->io_task_work.node, &tctx->prior_task_list);
@@ -2974,8 +2960,12 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
 
 static bool __io_complete_rw_common(struct io_kiocb *req, long res)
 {
-       if (req->rw.kiocb.ki_flags & IOCB_WRITE)
+       if (req->rw.kiocb.ki_flags & IOCB_WRITE) {
                kiocb_end_write(req);
+               fsnotify_modify(req->file);
+       } else {
+               fsnotify_access(req->file);
+       }
        if (unlikely(res != req->result)) {
                if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
                    io_rw_should_reissue(req)) {
@@ -3152,42 +3142,11 @@ static inline bool io_file_supports_nowait(struct io_kiocb *req)
 
 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       struct io_ring_ctx *ctx = req->ctx;
        struct kiocb *kiocb = &req->rw.kiocb;
-       struct file *file = req->file;
        unsigned ioprio;
        int ret;
 
-       if (!io_req_ffs_set(req))
-               req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
-
        kiocb->ki_pos = READ_ONCE(sqe->off);
-       kiocb->ki_flags = iocb_flags(file);
-       ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
-       if (unlikely(ret))
-               return ret;
-
-       /*
-        * If the file is marked O_NONBLOCK, still allow retry for it if it
-        * supports async. Otherwise it's impossible to use O_NONBLOCK files
-        * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
-        */
-       if ((kiocb->ki_flags & IOCB_NOWAIT) ||
-           ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
-               req->flags |= REQ_F_NOWAIT;
-
-       if (ctx->flags & IORING_SETUP_IOPOLL) {
-               if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
-                       return -EOPNOTSUPP;
-
-               kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
-               kiocb->ki_complete = io_complete_rw_iopoll;
-               req->iopoll_completed = 0;
-       } else {
-               if (kiocb->ki_flags & IOCB_HIPRI)
-                       return -EINVAL;
-               kiocb->ki_complete = io_complete_rw;
-       }
 
        ioprio = READ_ONCE(sqe->ioprio);
        if (ioprio) {
@@ -3203,6 +3162,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        req->imu = NULL;
        req->rw.addr = READ_ONCE(sqe->addr);
        req->rw.len = READ_ONCE(sqe->len);
+       req->rw.flags = READ_ONCE(sqe->rw_flags);
        req->buf_index = READ_ONCE(sqe->buf_index);
        return 0;
 }
@@ -3231,19 +3191,18 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
 {
        struct kiocb *kiocb = &req->rw.kiocb;
-       bool is_stream = req->file->f_mode & FMODE_STREAM;
 
-       if (kiocb->ki_pos == -1) {
-               if (!is_stream) {
-                       req->flags |= REQ_F_CUR_POS;
-                       kiocb->ki_pos = req->file->f_pos;
-                       return &kiocb->ki_pos;
-               } else {
-                       kiocb->ki_pos = 0;
-                       return NULL;
-               }
+       if (kiocb->ki_pos != -1)
+               return &kiocb->ki_pos;
+
+       if (!(req->file->f_mode & FMODE_STREAM)) {
+               req->flags |= REQ_F_CUR_POS;
+               kiocb->ki_pos = req->file->f_pos;
+               return &kiocb->ki_pos;
        }
-       return is_stream ? NULL : &kiocb->ki_pos;
+
+       kiocb->ki_pos = 0;
+       return NULL;
 }
 
 static void kiocb_done(struct io_kiocb *req, ssize_t ret,
@@ -3333,7 +3292,8 @@ static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter
        return 0;
 }
 
-static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
+static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
+                          unsigned int issue_flags)
 {
        struct io_mapped_ubuf *imu = req->imu;
        u16 index, buf_index = req->buf_index;
@@ -3343,7 +3303,7 @@ static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
 
                if (unlikely(buf_index >= ctx->nr_user_bufs))
                        return -EFAULT;
-               io_req_set_rsrc_node(req, ctx);
+               io_req_set_rsrc_node(req, ctx, issue_flags);
                index = array_index_nospec(buf_index, ctx->nr_user_bufs);
                imu = READ_ONCE(ctx->user_bufs[index]);
                req->imu = imu;
@@ -3505,7 +3465,7 @@ static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req,
        ssize_t ret;
 
        if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
-               ret = io_import_fixed(req, rw, iter);
+               ret = io_import_fixed(req, rw, iter, issue_flags);
                if (ret)
                        return ERR_PTR(ret);
                return NULL;
@@ -3706,13 +3666,6 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
        return 0;
 }
 
-static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
-       if (unlikely(!(req->file->f_mode & FMODE_READ)))
-               return -EBADF;
-       return io_prep_rw(req, sqe);
-}
-
 /*
  * This is our waitqueue callback handler, registered through __folio_lock_async()
  * when we initially tried to do the IO with the iocb armed our waitqueue.
@@ -3800,6 +3753,49 @@ static bool need_read_all(struct io_kiocb *req)
                S_ISBLK(file_inode(req->file)->i_mode);
 }
 
+static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
+{
+       struct kiocb *kiocb = &req->rw.kiocb;
+       struct io_ring_ctx *ctx = req->ctx;
+       struct file *file = req->file;
+       int ret;
+
+       if (unlikely(!file || !(file->f_mode & mode)))
+               return -EBADF;
+
+       if (!io_req_ffs_set(req))
+               req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
+
+       kiocb->ki_flags = iocb_flags(file);
+       ret = kiocb_set_rw_flags(kiocb, req->rw.flags);
+       if (unlikely(ret))
+               return ret;
+
+       /*
+        * If the file is marked O_NONBLOCK, still allow retry for it if it
+        * supports async. Otherwise it's impossible to use O_NONBLOCK files
+        * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
+        */
+       if ((kiocb->ki_flags & IOCB_NOWAIT) ||
+           ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
+               req->flags |= REQ_F_NOWAIT;
+
+       if (ctx->flags & IORING_SETUP_IOPOLL) {
+               if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
+                       return -EOPNOTSUPP;
+
+               kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
+               kiocb->ki_complete = io_complete_rw_iopoll;
+               req->iopoll_completed = 0;
+       } else {
+               if (kiocb->ki_flags & IOCB_HIPRI)
+                       return -EINVAL;
+               kiocb->ki_complete = io_complete_rw;
+       }
+
+       return 0;
+}
+
 static int io_read(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_rw_state __s, *s = &__s;
@@ -3835,6 +3831,9 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
                iov_iter_restore(&s->iter, &s->iter_state);
                iovec = NULL;
        }
+       ret = io_rw_init_file(req, FMODE_READ);
+       if (unlikely(ret))
+               return ret;
        req->result = iov_iter_count(&s->iter);
 
        if (force_nonblock) {
@@ -3938,13 +3937,6 @@ out_free:
        return 0;
 }
 
-static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
-       if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
-               return -EBADF;
-       return io_prep_rw(req, sqe);
-}
-
 static int io_write(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_rw_state __s, *s = &__s;
@@ -3965,6 +3957,9 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
                iov_iter_restore(&s->iter, &s->iter_state);
                iovec = NULL;
        }
+       ret = io_rw_init_file(req, FMODE_WRITE);
+       if (unlikely(ret))
+               return ret;
        req->result = iov_iter_count(&s->iter);
 
        if (force_nonblock) {
@@ -4335,18 +4330,11 @@ static int __io_splice_prep(struct io_kiocb *req,
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
 
-       sp->file_in = NULL;
        sp->len = READ_ONCE(sqe->len);
        sp->flags = READ_ONCE(sqe->splice_flags);
-
        if (unlikely(sp->flags & ~valid_flags))
                return -EINVAL;
-
-       sp->file_in = io_file_get(req->ctx, req, READ_ONCE(sqe->splice_fd_in),
-                                 (sp->flags & SPLICE_F_FD_IN_FIXED));
-       if (!sp->file_in)
-               return -EBADF;
-       req->flags |= REQ_F_NEED_CLEANUP;
+       sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in);
        return 0;
 }
 
@@ -4361,20 +4349,29 @@ static int io_tee_prep(struct io_kiocb *req,
 static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_splice *sp = &req->splice;
-       struct file *in = sp->file_in;
        struct file *out = sp->file_out;
        unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
+       struct file *in;
        long ret = 0;
 
        if (issue_flags & IO_URING_F_NONBLOCK)
                return -EAGAIN;
+
+       if (sp->flags & SPLICE_F_FD_IN_FIXED)
+               in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
+       else
+               in = io_file_get_normal(req, sp->splice_fd_in);
+       if (!in) {
+               ret = -EBADF;
+               goto done;
+       }
+
        if (sp->len)
                ret = do_tee(in, out, sp->len, flags);
 
        if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
                io_put_file(in);
-       req->flags &= ~REQ_F_NEED_CLEANUP;
-
+done:
        if (ret != sp->len)
                req_set_fail(req);
        io_req_complete(req, ret);
@@ -4393,15 +4390,24 @@ static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_splice *sp = &req->splice;
-       struct file *in = sp->file_in;
        struct file *out = sp->file_out;
        unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
        loff_t *poff_in, *poff_out;
+       struct file *in;
        long ret = 0;
 
        if (issue_flags & IO_URING_F_NONBLOCK)
                return -EAGAIN;
 
+       if (sp->flags & SPLICE_F_FD_IN_FIXED)
+               in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
+       else
+               in = io_file_get_normal(req, sp->splice_fd_in);
+       if (!in) {
+               ret = -EBADF;
+               goto done;
+       }
+
        poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
        poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
 
@@ -4410,8 +4416,7 @@ static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
 
        if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
                io_put_file(in);
-       req->flags &= ~REQ_F_NEED_CLEANUP;
-
+done:
        if (ret != sp->len)
                req_set_fail(req);
        io_req_complete(req, ret);
@@ -4439,9 +4444,6 @@ static int io_msg_ring_prep(struct io_kiocb *req,
                     sqe->splice_fd_in || sqe->buf_index || sqe->personality))
                return -EINVAL;
 
-       if (req->file->f_op != &io_uring_fops)
-               return -EBADFD;
-
        req->msg.user_data = READ_ONCE(sqe->off);
        req->msg.len = READ_ONCE(sqe->len);
        return 0;
@@ -4451,14 +4453,18 @@ static int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_ring_ctx *target_ctx;
        struct io_msg *msg = &req->msg;
-       int ret = -EOVERFLOW;
        bool filled;
+       int ret;
+
+       ret = -EBADFD;
+       if (req->file->f_op != &io_uring_fops)
+               goto done;
 
+       ret = -EOVERFLOW;
        target_ctx = req->file->private_data;
 
        spin_lock(&target_ctx->completion_lock);
-       filled = io_fill_cqe_aux(target_ctx, msg->user_data, msg->len,
-                                       IORING_CQE_F_MSG);
+       filled = io_fill_cqe_aux(target_ctx, msg->user_data, msg->len, 0);
        io_commit_cqring(target_ctx);
        spin_unlock(&target_ctx->completion_lock);
 
@@ -4467,6 +4473,9 @@ static int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
                ret = 0;
        }
 
+done:
+       if (ret < 0)
+               req_set_fail(req);
        __io_req_complete(req, issue_flags, ret, 0);
        return 0;
 }
@@ -4475,9 +4484,6 @@ static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_ring_ctx *ctx = req->ctx;
 
-       if (!req->file)
-               return -EBADF;
-
        if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
@@ -4537,6 +4543,8 @@ static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
                                req->sync.len);
        if (ret < 0)
                req_set_fail(req);
+       else
+               fsnotify_modify(req->file);
        io_req_complete(req, ret);
        return 0;
 }
@@ -5419,12 +5427,21 @@ static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        if (req->ctx->compat)
                sr->msg_flags |= MSG_CMSG_COMPAT;
 #endif
+       sr->done_io = 0;
        return 0;
 }
 
+static bool io_net_retry(struct socket *sock, int flags)
+{
+       if (!(flags & MSG_WAITALL))
+               return false;
+       return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
+}
+
 static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_async_msghdr iomsg, *kmsg;
+       struct io_sr_msg *sr = &req->sr_msg;
        struct socket *sock;
        struct io_buffer *kbuf;
        unsigned flags;
@@ -5467,6 +5484,11 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
                        return io_setup_async_msg(req, kmsg);
                if (ret == -ERESTARTSYS)
                        ret = -EINTR;
+               if (ret > 0 && io_net_retry(sock, flags)) {
+                       sr->done_io += ret;
+                       req->flags |= REQ_F_PARTIAL_IO;
+                       return io_setup_async_msg(req, kmsg);
+               }
                req_set_fail(req);
        } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
                req_set_fail(req);
@@ -5476,6 +5498,10 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
        if (kmsg->free_iov)
                kfree(kmsg->free_iov);
        req->flags &= ~REQ_F_NEED_CLEANUP;
+       if (ret >= 0)
+               ret += sr->done_io;
+       else if (sr->done_io)
+               ret = sr->done_io;
        __io_req_complete(req, issue_flags, ret, io_put_kbuf(req, issue_flags));
        return 0;
 }
@@ -5526,12 +5552,23 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
                        return -EAGAIN;
                if (ret == -ERESTARTSYS)
                        ret = -EINTR;
+               if (ret > 0 && io_net_retry(sock, flags)) {
+                       sr->len -= ret;
+                       sr->buf += ret;
+                       sr->done_io += ret;
+                       req->flags |= REQ_F_PARTIAL_IO;
+                       return -EAGAIN;
+               }
                req_set_fail(req);
        } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
 out_free:
                req_set_fail(req);
        }
 
+       if (ret >= 0)
+               ret += sr->done_io;
+       else if (sr->done_io)
+               ret = sr->done_io;
        __io_req_complete(req, issue_flags, ret, io_put_kbuf(req, issue_flags));
        return 0;
 }
@@ -5569,9 +5606,6 @@ static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
        struct file *file;
        int ret, fd;
 
-       if (req->file->f_flags & O_NONBLOCK)
-               req->flags |= REQ_F_NOWAIT;
-
        if (!fixed) {
                fd = __get_unused_fd_flags(accept->flags, accept->nofile);
                if (unlikely(fd < 0))
@@ -5691,108 +5725,6 @@ IO_NETOP_FN(send);
 IO_NETOP_FN(recv);
 #endif /* CONFIG_NET */
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-
-#define NAPI_TIMEOUT                   (60 * SEC_CONVERSION)
-
-struct napi_entry {
-       struct list_head        list;
-       unsigned int            napi_id;
-       unsigned long           timeout;
-};
-
-/*
- * Add busy poll NAPI ID from sk.
- */
-static void io_add_napi(struct file *file, struct io_ring_ctx *ctx)
-{
-       unsigned int napi_id;
-       struct socket *sock;
-       struct sock *sk;
-       struct napi_entry *ne;
-
-       if (!net_busy_loop_on())
-               return;
-
-       sock = sock_from_file(file);
-       if (!sock)
-               return;
-
-       sk = sock->sk;
-       if (!sk)
-               return;
-
-       napi_id = READ_ONCE(sk->sk_napi_id);
-
-       /* Non-NAPI IDs can be rejected */
-       if (napi_id < MIN_NAPI_ID)
-               return;
-
-       spin_lock(&ctx->napi_lock);
-       list_for_each_entry(ne, &ctx->napi_list, list) {
-               if (ne->napi_id == napi_id) {
-                       ne->timeout = jiffies + NAPI_TIMEOUT;
-                       goto out;
-               }
-       }
-
-       ne = kmalloc(sizeof(*ne), GFP_NOWAIT);
-       if (!ne)
-               goto out;
-
-       ne->napi_id = napi_id;
-       ne->timeout = jiffies + NAPI_TIMEOUT;
-       list_add_tail(&ne->list, &ctx->napi_list);
-out:
-       spin_unlock(&ctx->napi_lock);
-}
-
-static inline void io_check_napi_entry_timeout(struct napi_entry *ne)
-{
-       if (time_after(jiffies, ne->timeout)) {
-               list_del(&ne->list);
-               kfree(ne);
-       }
-}
-
-/*
- * Busy poll if globally on and supporting sockets found
- */
-static bool io_napi_busy_loop(struct list_head *napi_list)
-{
-       struct napi_entry *ne, *n;
-
-       list_for_each_entry_safe(ne, n, napi_list, list) {
-               napi_busy_loop(ne->napi_id, NULL, NULL, true,
-                              BUSY_POLL_BUDGET);
-               io_check_napi_entry_timeout(ne);
-       }
-       return !list_empty(napi_list);
-}
-
-static void io_free_napi_list(struct io_ring_ctx *ctx)
-{
-       spin_lock(&ctx->napi_lock);
-       while (!list_empty(&ctx->napi_list)) {
-               struct napi_entry *ne =
-                       list_first_entry(&ctx->napi_list, struct napi_entry,
-                                        list);
-
-               list_del(&ne->list);
-               kfree(ne);
-       }
-       spin_unlock(&ctx->napi_lock);
-}
-#else
-static inline void io_add_napi(struct file *file, struct io_ring_ctx *ctx)
-{
-}
-
-static inline void io_free_napi_list(struct io_ring_ctx *ctx)
-{
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
 struct io_poll_table {
        struct poll_table_struct pt;
        struct io_kiocb *req;
@@ -5801,7 +5733,7 @@ struct io_poll_table {
 };
 
 #define IO_POLL_CANCEL_FLAG    BIT(31)
-#define IO_POLL_REF_MASK       ((1u << 20)-1)
+#define IO_POLL_REF_MASK       GENMASK(30, 0)
 
 /*
  * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
@@ -5906,10 +5838,9 @@ static void io_poll_remove_entries(struct io_kiocb *req)
  * either spurious wakeup or multishot CQE is served. 0 when it's done with
  * the request, then the mask is stored in req->result.
  */
-static int io_poll_check_events(struct io_kiocb *req)
+static int io_poll_check_events(struct io_kiocb *req, bool locked)
 {
        struct io_ring_ctx *ctx = req->ctx;
-       struct io_poll_iocb *poll = io_poll_get_single(req);
        int v;
 
        /* req->task == current here, checking PF_EXITING is safe */
@@ -5926,14 +5857,17 @@ static int io_poll_check_events(struct io_kiocb *req)
                        return -ECANCELED;
 
                if (!req->result) {
-                       struct poll_table_struct pt = { ._key = req->cflags };
+                       struct poll_table_struct pt = { ._key = req->apoll_events };
+                       unsigned flags = locked ? 0 : IO_URING_F_UNLOCKED;
 
-                       req->result = vfs_poll(req->file, &pt) & req->cflags;
+                       if (unlikely(!io_assign_file(req, flags)))
+                               return -EBADF;
+                       req->result = vfs_poll(req->file, &pt) & req->apoll_events;
                }
 
                /* multishot, just fill an CQE and proceed */
-               if (req->result && !(req->cflags & EPOLLONESHOT)) {
-                       __poll_t mask = mangle_poll(req->result & poll->events);
+               if (req->result && !(req->apoll_events & EPOLLONESHOT)) {
+                       __poll_t mask = mangle_poll(req->result & req->apoll_events);
                        bool filled;
 
                        spin_lock(&ctx->completion_lock);
@@ -5944,7 +5878,6 @@ static int io_poll_check_events(struct io_kiocb *req)
                        if (unlikely(!filled))
                                return -ECANCELED;
                        io_cqring_ev_posted(ctx);
-                       io_add_napi(req->file, ctx);
                } else if (req->result) {
                        return 0;
                }
@@ -5963,7 +5896,7 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
        struct io_ring_ctx *ctx = req->ctx;
        int ret;
 
-       ret = io_poll_check_events(req);
+       ret = io_poll_check_events(req, *locked);
        if (ret > 0)
                return;
 
@@ -5988,7 +5921,7 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
        struct io_ring_ctx *ctx = req->ctx;
        int ret;
 
-       ret = io_poll_check_events(req);
+       ret = io_poll_check_events(req, *locked);
        if (ret > 0)
                return;
 
@@ -6012,7 +5945,7 @@ static void __io_poll_execute(struct io_kiocb *req, int mask, int events)
         * CPU. We want to avoid pulling in req->apoll->events for that
         * case.
         */
-       req->cflags = events;
+       req->apoll_events = events;
        if (req->opcode == IORING_OP_POLL_ADD)
                req->io_task_work.func = io_poll_task_func;
        else
@@ -6035,10 +5968,13 @@ static void io_poll_cancel_req(struct io_kiocb *req)
        io_poll_execute(req, 0, 0);
 }
 
+#define wqe_to_req(wait)       ((void *)((unsigned long) (wait)->private & ~1))
+#define wqe_is_double(wait)    ((unsigned long) (wait)->private & 1)
+
 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
                        void *key)
 {
-       struct io_kiocb *req = wait->private;
+       struct io_kiocb *req = wqe_to_req(wait);
        struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
                                                 wait);
        __poll_t mask = key_to_poll(key);
@@ -6076,7 +6012,10 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
                if (mask && poll->events & EPOLLONESHOT) {
                        list_del_init(&poll->wait.entry);
                        poll->head = NULL;
-                       req->flags &= ~REQ_F_SINGLE_POLL;
+                       if (wqe_is_double(wait))
+                               req->flags &= ~REQ_F_DOUBLE_POLL;
+                       else
+                               req->flags &= ~REQ_F_SINGLE_POLL;
                }
                __io_poll_execute(req, mask, poll->events);
        }
@@ -6088,6 +6027,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
                            struct io_poll_iocb **poll_ptr)
 {
        struct io_kiocb *req = pt->req;
+       unsigned long wqe_private = (unsigned long) req;
 
        /*
         * The file being polled uses multiple waitqueues for poll handling
@@ -6113,6 +6053,8 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
                        pt->error = -ENOMEM;
                        return;
                }
+               /* mark as double wq entry */
+               wqe_private |= 1;
                req->flags |= REQ_F_DOUBLE_POLL;
                io_init_poll_iocb(poll, first->events, first->wait.func);
                *poll_ptr = poll;
@@ -6123,7 +6065,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
        req->flags |= REQ_F_SINGLE_POLL;
        pt->nr_entries++;
        poll->head = head;
-       poll->wait.private = req;
+       poll->wait.private = (void *) wqe_private;
 
        if (poll->events & EPOLLEXCLUSIVE)
                add_wait_queue_exclusive(head, &poll->wait);
@@ -6150,7 +6092,6 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
        INIT_HLIST_NODE(&req->hash_node);
        io_init_poll_iocb(poll, mask, io_poll_wake);
        poll->file = req->file;
-       poll->wait.private = req;
 
        ipt->pt._key = mask;
        ipt->req = req;
@@ -6187,7 +6128,6 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
                __io_poll_execute(req, mask, poll->events);
                return 0;
        }
-       io_add_napi(req->file, req->ctx);
 
        /*
         * Release ownership. If someone tried to queue a tw while it was
@@ -6238,7 +6178,8 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
        } else {
                mask |= POLLOUT | POLLWRNORM;
        }
-
+       if (def->poll_exclusive)
+               mask |= EPOLLEXCLUSIVE;
        if (!(issue_flags & IO_URING_F_UNLOCKED) &&
            !list_empty(&ctx->apoll_cache)) {
                apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
@@ -6254,6 +6195,8 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
        req->flags |= REQ_F_POLLED;
        ipt.pt._qproc = io_async_queue_proc;
 
+       io_kbuf_recycle(req, issue_flags);
+
        ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
        if (ret || ipt.error)
                return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
@@ -6281,6 +6224,7 @@ static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx,
                list = &ctx->cancel_hash[i];
                hlist_for_each_entry_safe(req, tmp, list, hash_node) {
                        if (io_match_task_safe(req, tsk, cancel_all)) {
+                               hlist_del_init(&req->hash_node);
                                io_poll_cancel_req(req);
                                found = true;
                        }
@@ -6393,7 +6337,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
                return -EINVAL;
 
        io_req_set_refcount(req);
-       req->cflags = poll->events = io_poll_parse_events(sqe, flags);
+       req->apoll_events = poll->events = io_poll_parse_events(sqe, flags);
        return 0;
 }
 
@@ -6688,6 +6632,7 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
        if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0)
                return -EINVAL;
 
+       INIT_LIST_HEAD(&req->timeout.list);
        data->mode = io_translate_timeout_mode(flags);
        hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
 
@@ -6894,6 +6839,7 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
        up.nr = 0;
        up.tags = 0;
        up.resv = 0;
+       up.resv2 = 0;
 
        io_ring_submit_lock(ctx, needs_lock);
        ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
@@ -6914,11 +6860,10 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        case IORING_OP_READV:
        case IORING_OP_READ_FIXED:
        case IORING_OP_READ:
-               return io_read_prep(req, sqe);
        case IORING_OP_WRITEV:
        case IORING_OP_WRITE_FIXED:
        case IORING_OP_WRITE:
-               return io_write_prep(req, sqe);
+               return io_prep_rw(req, sqe);
        case IORING_OP_POLL_ADD:
                return io_poll_add_prep(req, sqe);
        case IORING_OP_POLL_REMOVE:
@@ -7075,8 +7020,11 @@ fail:
 
 static void io_clean_op(struct io_kiocb *req)
 {
-       if (req->flags & REQ_F_BUFFER_SELECTED)
+       if (req->flags & REQ_F_BUFFER_SELECTED) {
+               spin_lock(&req->ctx->completion_lock);
                io_put_kbuf_comp(req);
+               spin_unlock(&req->ctx->completion_lock);
+       }
 
        if (req->flags & REQ_F_NEED_CLEANUP) {
                switch (req->opcode) {
@@ -7098,11 +7046,6 @@ static void io_clean_op(struct io_kiocb *req)
                        kfree(io->free_iov);
                        break;
                        }
-               case IORING_OP_SPLICE:
-               case IORING_OP_TEE:
-                       if (!(req->splice.flags & SPLICE_F_FD_IN_FIXED))
-                               io_put_file(req->splice.file_in);
-                       break;
                case IORING_OP_OPENAT:
                case IORING_OP_OPENAT2:
                        if (req->open.filename)
@@ -7137,11 +7080,6 @@ static void io_clean_op(struct io_kiocb *req)
                kfree(req->apoll);
                req->apoll = NULL;
        }
-       if (req->flags & REQ_F_INFLIGHT) {
-               struct io_uring_task *tctx = req->task->io_uring;
-
-               atomic_dec(&tctx->inflight_tracked);
-       }
        if (req->flags & REQ_F_CREDS)
                put_cred(req->creds);
        if (req->flags & REQ_F_ASYNC_DATA) {
@@ -7151,11 +7089,31 @@ static void io_clean_op(struct io_kiocb *req)
        req->flags &= ~IO_REQ_CLEAN_FLAGS;
 }
 
+static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags)
+{
+       if (req->file || !io_op_defs[req->opcode].needs_file)
+               return true;
+
+       if (req->flags & REQ_F_FIXED_FILE)
+               req->file = io_file_get_fixed(req, req->fd, issue_flags);
+       else
+               req->file = io_file_get_normal(req, req->fd);
+       if (req->file)
+               return true;
+
+       req_set_fail(req);
+       req->result = -EBADF;
+       return false;
+}
+
 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
 {
        const struct cred *creds = NULL;
        int ret;
 
+       if (unlikely(!io_assign_file(req, issue_flags)))
+               return -EBADF;
+
        if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
                creds = override_creds(req->creds);
 
@@ -7305,10 +7263,11 @@ static struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
 static void io_wq_submit_work(struct io_wq_work *work)
 {
        struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+       const struct io_op_def *def = &io_op_defs[req->opcode];
        unsigned int issue_flags = IO_URING_F_UNLOCKED;
        bool needs_poll = false;
        struct io_kiocb *timeout;
-       int ret = 0;
+       int ret = 0, err = -ECANCELED;
 
        /* one will be dropped by ->io_free_work() after returning to io-wq */
        if (!(req->flags & REQ_F_REFCOUNT))
@@ -7320,14 +7279,20 @@ static void io_wq_submit_work(struct io_wq_work *work)
        if (timeout)
                io_queue_linked_timeout(timeout);
 
+
        /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
        if (work->flags & IO_WQ_WORK_CANCEL) {
-               io_req_task_queue_fail(req, -ECANCELED);
+fail:
+               io_req_task_queue_fail(req, err);
                return;
        }
+       if (!io_assign_file(req, issue_flags)) {
+               err = -EBADF;
+               work->flags |= IO_WQ_WORK_CANCEL;
+               goto fail;
+       }
 
        if (req->flags & REQ_F_FORCE_ASYNC) {
-               const struct io_op_def *def = &io_op_defs[req->opcode];
                bool opcode_poll = def->pollin || def->pollout;
 
                if (opcode_poll && file_can_poll(req->file)) {
@@ -7384,46 +7349,56 @@ static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file
        file_slot->file_ptr = file_ptr;
 }
 
-static inline struct file *io_file_get_fixed(struct io_ring_ctx *ctx,
-                                            struct io_kiocb *req, int fd)
+static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
+                                            unsigned int issue_flags)
 {
-       struct file *file;
+       struct io_ring_ctx *ctx = req->ctx;
+       struct file *file = NULL;
        unsigned long file_ptr;
 
+       if (issue_flags & IO_URING_F_UNLOCKED)
+               mutex_lock(&ctx->uring_lock);
+
        if (unlikely((unsigned int)fd >= ctx->nr_user_files))
-               return NULL;
+               goto out;
        fd = array_index_nospec(fd, ctx->nr_user_files);
        file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
        file = (struct file *) (file_ptr & FFS_MASK);
        file_ptr &= ~FFS_MASK;
        /* mask in overlapping REQ_F and FFS bits */
        req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT);
-       io_req_set_rsrc_node(req, ctx);
+       io_req_set_rsrc_node(req, ctx, 0);
+out:
+       if (issue_flags & IO_URING_F_UNLOCKED)
+               mutex_unlock(&ctx->uring_lock);
        return file;
 }
 
-static struct file *io_file_get_normal(struct io_ring_ctx *ctx,
-                                      struct io_kiocb *req, int fd)
+/*
+ * Drop the file for requeue operations. Only used of req->file is the
+ * io_uring descriptor itself.
+ */
+static void io_drop_inflight_file(struct io_kiocb *req)
+{
+       if (unlikely(req->flags & REQ_F_INFLIGHT)) {
+               fput(req->file);
+               req->file = NULL;
+               req->flags &= ~REQ_F_INFLIGHT;
+       }
+}
+
+static struct file *io_file_get_normal(struct io_kiocb *req, int fd)
 {
        struct file *file = fget(fd);
 
-       trace_io_uring_file_get(ctx, req, req->user_data, fd);
+       trace_io_uring_file_get(req->ctx, req, req->user_data, fd);
 
        /* we don't allow fixed io_uring files */
-       if (file && unlikely(file->f_op == &io_uring_fops))
-               io_req_track_inflight(req);
+       if (file && file->f_op == &io_uring_fops)
+               req->flags |= REQ_F_INFLIGHT;
        return file;
 }
 
-static inline struct file *io_file_get(struct io_ring_ctx *ctx,
-                                      struct io_kiocb *req, int fd, bool fixed)
-{
-       if (fixed)
-               return io_file_get_fixed(ctx, req, fd);
-       else
-               return io_file_get_normal(ctx, req, fd);
-}
-
 static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
 {
        struct io_kiocb *prev = req->timeout.prev;
@@ -7505,11 +7480,9 @@ static void io_queue_sqe_arm_apoll(struct io_kiocb *req)
                 * Queued up for async execution, worker will release
                 * submit reference when the iocb is actually submitted.
                 */
-               io_kbuf_recycle(req);
                io_queue_async_work(req, NULL);
                break;
        case IO_APOLL_OK:
-               io_kbuf_recycle(req);
                break;
        }
 
@@ -7665,6 +7638,8 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
        if (io_op_defs[opcode].needs_file) {
                struct io_submit_state *state = &ctx->submit_state;
 
+               req->fd = READ_ONCE(sqe->fd);
+
                /*
                 * Plug now if we have more than 2 IO left after this, and the
                 * target is potentially a read/write to block based storage.
@@ -7674,11 +7649,6 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
                        state->need_plug = false;
                        blk_start_plug_nr_ios(&state->plug, state->submit_nr);
                }
-
-               req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd),
-                                       (sqe_flags & IOSQE_FIXED_FILE));
-               if (unlikely(!req->file))
-                       return -EBADF;
        }
 
        personality = READ_ONCE(sqe->personality);
@@ -7953,13 +7923,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
                    !(ctx->flags & IORING_SETUP_R_DISABLED))
                        ret = io_submit_sqes(ctx, to_submit);
                mutex_unlock(&ctx->uring_lock);
-#ifdef CONFIG_NET_RX_BUSY_POLL
-               spin_lock(&ctx->napi_lock);
-               if (!list_empty(&ctx->napi_list) &&
-                   io_napi_busy_loop(&ctx->napi_list))
-                       ++ret;
-               spin_unlock(&ctx->napi_lock);
-#endif
+
                if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
                        wake_up(&ctx->sqo_sq_wait);
                if (creds)
@@ -8053,6 +8017,13 @@ static int io_sq_thread(void *data)
                                        needs_sched = false;
                                        break;
                                }
+
+                               /*
+                                * Ensure the store of the wakeup flag is not
+                                * reordered with the load of the SQ tail
+                                */
+                               smp_mb();
+
                                if (io_sqring_entries(ctx)) {
                                        needs_sched = false;
                                        break;
@@ -8090,9 +8061,6 @@ struct io_wait_queue {
        struct io_ring_ctx *ctx;
        unsigned cq_tail;
        unsigned nr_timeouts;
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       unsigned busy_poll_to;
-#endif
 };
 
 static inline bool io_should_wake(struct io_wait_queue *iowq)
@@ -8154,87 +8122,6 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
        return 1;
 }
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static void io_adjust_busy_loop_timeout(struct timespec64 *ts,
-                                       struct io_wait_queue *iowq)
-{
-       unsigned busy_poll_to = READ_ONCE(sysctl_net_busy_poll);
-       struct timespec64 pollto = ns_to_timespec64(1000 * (s64)busy_poll_to);
-
-       if (timespec64_compare(ts, &pollto) > 0) {
-               *ts = timespec64_sub(*ts, pollto);
-               iowq->busy_poll_to = busy_poll_to;
-       } else {
-               u64 to = timespec64_to_ns(ts);
-
-               do_div(to, 1000);
-               iowq->busy_poll_to = to;
-               ts->tv_sec = 0;
-               ts->tv_nsec = 0;
-       }
-}
-
-static inline bool io_busy_loop_timeout(unsigned long start_time,
-                                       unsigned long bp_usec)
-{
-       if (bp_usec) {
-               unsigned long end_time = start_time + bp_usec;
-               unsigned long now = busy_loop_current_time();
-
-               return time_after(now, end_time);
-       }
-       return true;
-}
-
-static bool io_busy_loop_end(void *p, unsigned long start_time)
-{
-       struct io_wait_queue *iowq = p;
-
-       return signal_pending(current) ||
-              io_should_wake(iowq) ||
-              io_busy_loop_timeout(start_time, iowq->busy_poll_to);
-}
-
-static void io_blocking_napi_busy_loop(struct list_head *napi_list,
-                                      struct io_wait_queue *iowq)
-{
-       unsigned long start_time =
-               list_is_singular(napi_list) ? 0 :
-               busy_loop_current_time();
-
-       do {
-               if (list_is_singular(napi_list)) {
-                       struct napi_entry *ne =
-                               list_first_entry(napi_list,
-                                                struct napi_entry, list);
-
-                       napi_busy_loop(ne->napi_id, io_busy_loop_end, iowq,
-                                      true, BUSY_POLL_BUDGET);
-                       io_check_napi_entry_timeout(ne);
-                       break;
-               }
-       } while (io_napi_busy_loop(napi_list) &&
-                !io_busy_loop_end(iowq, start_time));
-}
-
-static void io_putback_napi_list(struct io_ring_ctx *ctx,
-                                struct list_head *napi_list)
-{
-       struct napi_entry *cne, *lne;
-
-       spin_lock(&ctx->napi_lock);
-       list_for_each_entry(cne, &ctx->napi_list, list)
-               list_for_each_entry(lne, napi_list, list)
-                       if (cne->napi_id == lne->napi_id) {
-                               list_del(&lne->list);
-                               kfree(lne);
-                               break;
-                       }
-       list_splice(napi_list, &ctx->napi_list);
-       spin_unlock(&ctx->napi_lock);
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
 /*
  * Wait until events become available, if we don't already have some. The
  * application must reap them itself, as they reside on the shared cq ring.
@@ -8247,9 +8134,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
        struct io_rings *rings = ctx->rings;
        ktime_t timeout = KTIME_MAX;
        int ret;
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       LIST_HEAD(local_napi_list);
-#endif
 
        do {
                io_cqring_overflow_flush(ctx);
@@ -8272,29 +8156,13 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                        return ret;
        }
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       iowq.busy_poll_to = 0;
-       if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
-               spin_lock(&ctx->napi_lock);
-               list_splice_init(&ctx->napi_list, &local_napi_list);
-               spin_unlock(&ctx->napi_lock);
-       }
-#endif
        if (uts) {
                struct timespec64 ts;
 
                if (get_timespec64(&ts, uts))
                        return -EFAULT;
-#ifdef CONFIG_NET_RX_BUSY_POLL
-               if (!list_empty(&local_napi_list))
-                       io_adjust_busy_loop_timeout(&ts, &iowq);
-#endif
                timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
        }
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       else if (!list_empty(&local_napi_list))
-               iowq.busy_poll_to = READ_ONCE(sysctl_net_busy_poll);
-#endif
 
        init_waitqueue_func_entry(&iowq.wq, io_wake_function);
        iowq.wq.private = current;
@@ -8304,12 +8172,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
        iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
 
        trace_io_uring_cqring_wait(ctx, min_events);
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       if (iowq.busy_poll_to)
-               io_blocking_napi_busy_loop(&local_napi_list, &iowq);
-       if (!list_empty(&local_napi_list))
-               io_putback_napi_list(ctx, &local_napi_list);
-#endif
        do {
                /* if we can't even flush overflow, don't wait for more */
                if (!io_cqring_overflow_flush(ctx)) {
@@ -8778,10 +8640,15 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
                refcount_add(skb->truesize, &sk->sk_wmem_alloc);
                skb_queue_head(&sk->sk_receive_queue, skb);
 
-               for (i = 0; i < nr_files; i++)
-                       fput(fpl->fp[i]);
+               for (i = 0; i < nr; i++) {
+                       struct file *file = io_file_from_index(ctx, i + offset);
+
+                       if (file)
+                               fput(file);
+               }
        } else {
                kfree_skb(skb);
+               free_uid(fpl->user);
                kfree(fpl);
        }
 
@@ -9069,13 +8936,15 @@ static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
 static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
                                 struct io_rsrc_node *node, void *rsrc)
 {
+       u64 *tag_slot = io_get_tag_slot(data, idx);
        struct io_rsrc_put *prsrc;
 
        prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
        if (!prsrc)
                return -ENOMEM;
 
-       prsrc->tag = *io_get_tag_slot(data, idx);
+       prsrc->tag = *tag_slot;
+       *tag_slot = 0;
        prsrc->rsrc = rsrc;
        list_add(&prsrc->list, &node->rsrc_list);
        return 0;
@@ -9144,7 +9013,7 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
        bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
        struct io_fixed_file *file_slot;
        struct file *file;
-       int ret, i;
+       int ret;
 
        io_ring_submit_lock(ctx, needs_lock);
        ret = -ENXIO;
@@ -9157,8 +9026,8 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
        if (ret)
                goto out;
 
-       i = array_index_nospec(offset, ctx->nr_user_files);
-       file_slot = io_fixed_file_slot(&ctx->file_table, i);
+       offset = array_index_nospec(offset, ctx->nr_user_files);
+       file_slot = io_fixed_file_slot(&ctx->file_table, offset);
        ret = -EBADF;
        if (!file_slot->file_ptr)
                goto out;
@@ -9214,8 +9083,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
 
                if (file_slot->file_ptr) {
                        file = (struct file *)(file_slot->file_ptr & FFS_MASK);
-                       err = io_queue_rsrc_removal(data, up->offset + done,
-                                                   ctx->rsrc_node, file);
+                       err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
                        if (err)
                                break;
                        file_slot->file_ptr = 0;
@@ -9240,7 +9108,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
                                err = -EBADF;
                                break;
                        }
-                       *io_get_tag_slot(data, up->offset + done) = tag;
+                       *io_get_tag_slot(data, i) = tag;
                        io_fixed_file_set(file_slot, file);
                        err = io_sqe_file_register(ctx, file, i);
                        if (err) {
@@ -9324,7 +9192,6 @@ static __cold int io_uring_alloc_task_context(struct task_struct *task,
        xa_init(&tctx->xa);
        init_waitqueue_head(&tctx->wait);
        atomic_set(&tctx->in_idle, 0);
-       atomic_set(&tctx->inflight_tracked, 0);
        task->io_uring = tctx;
        spin_lock_init(&tctx->task_lock);
        INIT_WQ_LIST(&tctx->task_list);
@@ -9899,7 +9766,7 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
 
                i = array_index_nospec(offset, ctx->nr_user_bufs);
                if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
-                       err = io_queue_rsrc_removal(ctx->buf_data, offset,
+                       err = io_queue_rsrc_removal(ctx->buf_data, i,
                                                    ctx->rsrc_node, ctx->user_bufs[i]);
                        if (unlikely(err)) {
                                io_buffer_unmap(ctx, &imu);
@@ -10094,7 +9961,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
        io_req_caches_free(ctx);
        if (ctx->hash_map)
                io_wq_put_hash(ctx->hash_map);
-       io_free_napi_list(ctx);
        kfree(ctx->cancel_hash);
        kfree(ctx->dummy_ubuf);
        kfree(ctx->io_buffers);
@@ -10517,7 +10383,7 @@ static __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
 static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
 {
        if (tracked)
-               return atomic_read(&tctx->inflight_tracked);
+               return 0;
        return percpu_counter_sum(&tctx->inflight);
 }
 
@@ -10668,6 +10534,11 @@ static int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg,
                        break;
                }
 
+               if (reg.resv) {
+                       ret = -EINVAL;
+                       break;
+               }
+
                if (reg.offset == -1U) {
                        start = 0;
                        end = IO_RINGFD_REG_MAX;
@@ -10714,7 +10585,7 @@ static int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg,
                        ret = -EFAULT;
                        break;
                }
-               if (reg.offset >= IO_RINGFD_REG_MAX) {
+               if (reg.resv || reg.offset >= IO_RINGFD_REG_MAX) {
                        ret = -EINVAL;
                        break;
                }
@@ -10841,6 +10712,8 @@ static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz
                return -EINVAL;
        if (copy_from_user(&arg, argp, sizeof(arg)))
                return -EFAULT;
+       if (arg.pad)
+               return -EINVAL;
        *sig = u64_to_user_ptr(arg.sigmask);
        *argsz = arg.sigmask_sz;
        *ts = u64_to_user_ptr(arg.ts);
@@ -11322,7 +11195,8 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
                        IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
                        IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
                        IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
-                       IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP;
+                       IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
+                       IORING_FEAT_LINKED_FILE;
 
        if (copy_to_user(params, p, sizeof(*p))) {
                ret = -EFAULT;
@@ -11533,8 +11407,6 @@ static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
        __u32 tmp;
        int err;
 
-       if (up->resv)
-               return -EINVAL;
        if (check_add_overflow(up->offset, nr_args, &tmp))
                return -EOVERFLOW;
        err = io_rsrc_node_switch_start(ctx);
@@ -11560,6 +11432,8 @@ static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
        memset(&up, 0, sizeof(up));
        if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
                return -EFAULT;
+       if (up.resv || up.resv2)
+               return -EINVAL;
        return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
 }
 
@@ -11572,7 +11446,7 @@ static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
                return -EINVAL;
        if (copy_from_user(&up, arg, sizeof(up)))
                return -EFAULT;
-       if (!up.nr || up.resv)
+       if (!up.nr || up.resv || up.resv2)
                return -EINVAL;
        return __io_register_rsrc_update(ctx, type, &up, up.nr);
 }
@@ -11620,7 +11494,15 @@ static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
        if (len > cpumask_size())
                len = cpumask_size();
 
-       if (copy_from_user(new_mask, arg, len)) {
+       if (in_compat_syscall()) {
+               ret = compat_get_bitmap(cpumask_bits(new_mask),
+                                       (const compat_ulong_t __user *)arg,
+                                       len * 8 /* CHAR_BIT */);
+       } else {
+               ret = copy_from_user(new_mask, arg, len);
+       }
+
+       if (ret) {
                free_cpumask_var(new_mask);
                return -EFAULT;
        }
index 090bf47606aba5ec14ff02622e2d1f6c0b723bb2..80ac36aea913b8528d6698c8cb510424b35f3aee 100644 (file)
@@ -173,7 +173,7 @@ int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
 
        if (*len == 0)
                return -EINVAL;
-       if (start > maxbytes)
+       if (start >= maxbytes)
                return -EFBIG;
 
        /*
index 49dccd9050f129823389ace9a88333b1d4799e1b..8ce8720093b9afc41187175422f73a4fbf250b8f 100644 (file)
@@ -435,18 +435,17 @@ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
 {
        struct iomap_page *iop = to_iomap_page(folio);
        struct inode *inode = folio->mapping->host;
-       size_t len;
        unsigned first, last, i;
 
        if (!iop)
                return false;
 
-       /* Limit range to this folio */
-       len = min(folio_size(folio) - from, count);
+       /* Caller's range may extend past the end of this folio */
+       count = min(folio_size(folio) - from, count);
 
-       /* First and last blocks in range within page */
+       /* First and last blocks in range within folio */
        first = from >> inode->i_blkbits;
-       last = (from + len - 1) >> inode->i_blkbits;
+       last = (from + count - 1) >> inode->i_blkbits;
 
        for (i = first; i <= last; i++)
                if (!test_bit(i, iop->uptodate))
index b288c8ae1236b0f81f271e0fcfcaf5ee246b2933..837cd55fd4c5e534b0b0c802b1dc1669c9b7a5c2 100644 (file)
@@ -415,13 +415,15 @@ int jffs2_do_mount_fs(struct jffs2_sb_info *c)
                jffs2_free_ino_caches(c);
                jffs2_free_raw_node_refs(c);
                ret = -EIO;
-               goto out_free;
+               goto out_sum_exit;
        }
 
        jffs2_calc_trigger_levels(c);
 
        return 0;
 
+ out_sum_exit:
+       jffs2_sum_exit(c);
  out_free:
        kvfree(c->blocks);
 
index 2ac410477c4f46726be5081e0e0450c771f1a495..71f03a5d36ed257d4ebd12a49ef54a044e1444b6 100644 (file)
@@ -603,8 +603,8 @@ out_root:
        jffs2_free_ino_caches(c);
        jffs2_free_raw_node_refs(c);
        kvfree(c->blocks);
- out_inohash:
        jffs2_clear_xattr_subsystem(c);
+ out_inohash:
        kfree(c->inocache_list);
  out_wbuf:
        jffs2_flash_cleanup(c);
index 2e4a86763c07cd12b945518690768c4a0388de5f..93a2951538cea0cc0e67d27f7f3143c0fbd2cfe6 100644 (file)
 #include <linux/mutex.h>
 
 struct jffs2_inode_info {
-       /* We need an internal mutex similar to inode->i_mutex.
+       /* We need an internal mutex similar to inode->i_rwsem.
           Unfortunately, we can't used the existing one, because
           either the GC would deadlock, or we'd have to release it
           before letting GC proceed. Or we'd have to put ugliness
-          into the GC code so it didn't attempt to obtain the i_mutex
+          into the GC code so it didn't attempt to obtain the i_rwsem
           for the inode(s) which are already locked */
        struct mutex sem;
 
index b676056826beb69a05aca4ce7028720d2eeea5cf..29671e33a1714c8ca5c1e272ac58821e00078ef1 100644 (file)
@@ -136,7 +136,7 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
                if (!s) {
                        JFFS2_WARNING("Can't allocate memory for summary\n");
                        ret = -ENOMEM;
-                       goto out;
+                       goto out_buf;
                }
        }
 
@@ -275,13 +275,15 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
        }
        ret = 0;
  out:
+       jffs2_sum_reset_collected(s);
+       kfree(s);
+ out_buf:
        if (buf_size)
                kfree(flashbuf);
 #ifndef __ECOS
        else
                mtd_unpoint(c->mtd, 0, c->mtd->size);
 #endif
-       kfree(s);
        return ret;
 }
 
index 74067a73ff78636e61192360c23d8c0d89bbfdf7..88423069407c04ab0253d654713943cc4b09c459 100644 (file)
@@ -120,13 +120,8 @@ static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
                if (next == ERR_PTR(-ENODEV))
                        kernfs_seq_stop_active(sf, next);
                return next;
-       } else {
-               /*
-                * The same behavior and code as single_open().  Returns
-                * !NULL if pos is at the beginning; otherwise, NULL.
-                */
-               return NULL + !*ppos;
        }
+       return single_start(sf, ppos);
 }
 
 static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos)
index 077b8761d0993fd858ac1d3ae465ae684d393596..23871b18a4292275a9cf9f6ffde22307f249f6d5 100644 (file)
@@ -656,8 +656,8 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
                rsp->OplockLevel = SMB2_OPLOCK_LEVEL_NONE;
        rsp->Reserved = 0;
        rsp->Reserved2 = 0;
-       rsp->PersistentFid = cpu_to_le64(fp->persistent_id);
-       rsp->VolatileFid = cpu_to_le64(fp->volatile_id);
+       rsp->PersistentFid = fp->persistent_id;
+       rsp->VolatileFid = fp->volatile_id;
 
        inc_rfc1001_len(work->response_buf, 24);
 
index 2e12f6d8483bda61364e79b5f7ddaf2a7ae1423a..4cd03d661df0b1cde2ad3bb8aaf4bce9ef13770c 100644 (file)
@@ -585,7 +585,7 @@ static int __init ksmbd_server_init(void)
        if (ret)
                goto err_crypto_destroy;
 
-       pr_warn_once("The ksmbd server is experimental, use at your own risk.\n");
+       pr_warn_once("The ksmbd server is experimental\n");
 
        return 0;
 
index 67e8e28e3fc3529648dc99b615fdae012afa3349..3bf6c56c654cfbe806fd0ec634bca17f12bcb717 100644 (file)
@@ -377,12 +377,8 @@ static void init_chained_smb2_rsp(struct ksmbd_work *work)
         * command in the compound request
         */
        if (req->Command == SMB2_CREATE && rsp->Status == STATUS_SUCCESS) {
-               work->compound_fid =
-                       le64_to_cpu(((struct smb2_create_rsp *)rsp)->
-                               VolatileFileId);
-               work->compound_pfid =
-                       le64_to_cpu(((struct smb2_create_rsp *)rsp)->
-                               PersistentFileId);
+               work->compound_fid = ((struct smb2_create_rsp *)rsp)->VolatileFileId;
+               work->compound_pfid = ((struct smb2_create_rsp *)rsp)->PersistentFileId;
                work->compound_sid = le64_to_cpu(rsp->SessionId);
        }
 
@@ -2129,7 +2125,7 @@ static noinline int create_smb2_pipe(struct ksmbd_work *work)
        rsp->EndofFile = cpu_to_le64(0);
        rsp->FileAttributes = FILE_ATTRIBUTE_NORMAL_LE;
        rsp->Reserved2 = 0;
-       rsp->VolatileFileId = cpu_to_le64(id);
+       rsp->VolatileFileId = id;
        rsp->PersistentFileId = 0;
        rsp->CreateContextsOffset = 0;
        rsp->CreateContextsLength = 0;
@@ -3157,8 +3153,8 @@ int smb2_open(struct ksmbd_work *work)
 
        rsp->Reserved2 = 0;
 
-       rsp->PersistentFileId = cpu_to_le64(fp->persistent_id);
-       rsp->VolatileFileId = cpu_to_le64(fp->volatile_id);
+       rsp->PersistentFileId = fp->persistent_id;
+       rsp->VolatileFileId = fp->volatile_id;
 
        rsp->CreateContextsOffset = 0;
        rsp->CreateContextsLength = 0;
@@ -3865,9 +3861,7 @@ int smb2_query_dir(struct ksmbd_work *work)
                goto err_out2;
        }
 
-       dir_fp = ksmbd_lookup_fd_slow(work,
-                                     le64_to_cpu(req->VolatileFileId),
-                                     le64_to_cpu(req->PersistentFileId));
+       dir_fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
        if (!dir_fp) {
                rc = -EBADF;
                goto err_out2;
@@ -4088,12 +4082,12 @@ static int smb2_get_info_file_pipe(struct ksmbd_session *sess,
         * Windows can sometime send query file info request on
         * pipe without opening it, checking error condition here
         */
-       id = le64_to_cpu(req->VolatileFileId);
+       id = req->VolatileFileId;
        if (!ksmbd_session_rpc_method(sess, id))
                return -ENOENT;
 
        ksmbd_debug(SMB, "FileInfoClass %u, FileId 0x%llx\n",
-                   req->FileInfoClass, le64_to_cpu(req->VolatileFileId));
+                   req->FileInfoClass, req->VolatileFileId);
 
        switch (req->FileInfoClass) {
        case FILE_STANDARD_INFORMATION:
@@ -4738,7 +4732,7 @@ static int smb2_get_info_file(struct ksmbd_work *work,
        }
 
        if (work->next_smb2_rcv_hdr_off) {
-               if (!has_file_id(le64_to_cpu(req->VolatileFileId))) {
+               if (!has_file_id(req->VolatileFileId)) {
                        ksmbd_debug(SMB, "Compound request set FID = %llu\n",
                                    work->compound_fid);
                        id = work->compound_fid;
@@ -4747,8 +4741,8 @@ static int smb2_get_info_file(struct ksmbd_work *work,
        }
 
        if (!has_file_id(id)) {
-               id = le64_to_cpu(req->VolatileFileId);
-               pid = le64_to_cpu(req->PersistentFileId);
+               id = req->VolatileFileId;
+               pid = req->PersistentFileId;
        }
 
        fp = ksmbd_lookup_fd_slow(work, id, pid);
@@ -5113,7 +5107,7 @@ static int smb2_get_info_sec(struct ksmbd_work *work,
        }
 
        if (work->next_smb2_rcv_hdr_off) {
-               if (!has_file_id(le64_to_cpu(req->VolatileFileId))) {
+               if (!has_file_id(req->VolatileFileId)) {
                        ksmbd_debug(SMB, "Compound request set FID = %llu\n",
                                    work->compound_fid);
                        id = work->compound_fid;
@@ -5122,8 +5116,8 @@ static int smb2_get_info_sec(struct ksmbd_work *work,
        }
 
        if (!has_file_id(id)) {
-               id = le64_to_cpu(req->VolatileFileId);
-               pid = le64_to_cpu(req->PersistentFileId);
+               id = req->VolatileFileId;
+               pid = req->PersistentFileId;
        }
 
        fp = ksmbd_lookup_fd_slow(work, id, pid);
@@ -5221,7 +5215,7 @@ static noinline int smb2_close_pipe(struct ksmbd_work *work)
        struct smb2_close_req *req = smb2_get_msg(work->request_buf);
        struct smb2_close_rsp *rsp = smb2_get_msg(work->response_buf);
 
-       id = le64_to_cpu(req->VolatileFileId);
+       id = req->VolatileFileId;
        ksmbd_session_rpc_close(work->sess, id);
 
        rsp->StructureSize = cpu_to_le16(60);
@@ -5280,7 +5274,7 @@ int smb2_close(struct ksmbd_work *work)
        }
 
        if (work->next_smb2_rcv_hdr_off &&
-           !has_file_id(le64_to_cpu(req->VolatileFileId))) {
+           !has_file_id(req->VolatileFileId)) {
                if (!has_file_id(work->compound_fid)) {
                        /* file already closed, return FILE_CLOSED */
                        ksmbd_debug(SMB, "file already closed\n");
@@ -5299,7 +5293,7 @@ int smb2_close(struct ksmbd_work *work)
                        work->compound_pfid = KSMBD_NO_FID;
                }
        } else {
-               volatile_id = le64_to_cpu(req->VolatileFileId);
+               volatile_id = req->VolatileFileId;
        }
        ksmbd_debug(SMB, "volatile_id = %llu\n", volatile_id);
 
@@ -5988,7 +5982,7 @@ int smb2_set_info(struct ksmbd_work *work)
        if (work->next_smb2_rcv_hdr_off) {
                req = ksmbd_req_buf_next(work);
                rsp = ksmbd_resp_buf_next(work);
-               if (!has_file_id(le64_to_cpu(req->VolatileFileId))) {
+               if (!has_file_id(req->VolatileFileId)) {
                        ksmbd_debug(SMB, "Compound request set FID = %llu\n",
                                    work->compound_fid);
                        id = work->compound_fid;
@@ -6000,8 +5994,8 @@ int smb2_set_info(struct ksmbd_work *work)
        }
 
        if (!has_file_id(id)) {
-               id = le64_to_cpu(req->VolatileFileId);
-               pid = le64_to_cpu(req->PersistentFileId);
+               id = req->VolatileFileId;
+               pid = req->PersistentFileId;
        }
 
        fp = ksmbd_lookup_fd_slow(work, id, pid);
@@ -6079,7 +6073,7 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work)
        struct smb2_read_req *req = smb2_get_msg(work->request_buf);
        struct smb2_read_rsp *rsp = smb2_get_msg(work->response_buf);
 
-       id = le64_to_cpu(req->VolatileFileId);
+       id = req->VolatileFileId;
 
        inc_rfc1001_len(work->response_buf, 16);
        rpc_resp = ksmbd_rpc_read(work->sess, id);
@@ -6215,8 +6209,7 @@ int smb2_read(struct ksmbd_work *work)
                        goto out;
        }
 
-       fp = ksmbd_lookup_fd_slow(work, le64_to_cpu(req->VolatileFileId),
-                                 le64_to_cpu(req->PersistentFileId));
+       fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
        if (!fp) {
                err = -ENOENT;
                goto out;
@@ -6335,7 +6328,7 @@ static noinline int smb2_write_pipe(struct ksmbd_work *work)
        size_t length;
 
        length = le32_to_cpu(req->Length);
-       id = le64_to_cpu(req->VolatileFileId);
+       id = req->VolatileFileId;
 
        if (le16_to_cpu(req->DataOffset) ==
            offsetof(struct smb2_write_req, Buffer)) {
@@ -6471,8 +6464,7 @@ int smb2_write(struct ksmbd_work *work)
                goto out;
        }
 
-       fp = ksmbd_lookup_fd_slow(work, le64_to_cpu(req->VolatileFileId),
-                                 le64_to_cpu(req->PersistentFileId));
+       fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
        if (!fp) {
                err = -ENOENT;
                goto out;
@@ -6584,12 +6576,9 @@ int smb2_flush(struct ksmbd_work *work)
 
        WORK_BUFFERS(work, req, rsp);
 
-       ksmbd_debug(SMB, "SMB2_FLUSH called for fid %llu\n",
-                   le64_to_cpu(req->VolatileFileId));
+       ksmbd_debug(SMB, "SMB2_FLUSH called for fid %llu\n", req->VolatileFileId);
 
-       err = ksmbd_vfs_fsync(work,
-                             le64_to_cpu(req->VolatileFileId),
-                             le64_to_cpu(req->PersistentFileId));
+       err = ksmbd_vfs_fsync(work, req->VolatileFileId, req->PersistentFileId);
        if (err)
                goto out;
 
@@ -6618,8 +6607,7 @@ int smb2_cancel(struct ksmbd_work *work)
        struct ksmbd_conn *conn = work->conn;
        struct smb2_hdr *hdr = smb2_get_msg(work->request_buf);
        struct smb2_hdr *chdr;
-       struct ksmbd_work *cancel_work = NULL;
-       int canceled = 0;
+       struct ksmbd_work *cancel_work = NULL, *iter;
        struct list_head *command_list;
 
        ksmbd_debug(SMB, "smb2 cancel called on mid %llu, async flags 0x%x\n",
@@ -6629,11 +6617,11 @@ int smb2_cancel(struct ksmbd_work *work)
                command_list = &conn->async_requests;
 
                spin_lock(&conn->request_lock);
-               list_for_each_entry(cancel_work, command_list,
+               list_for_each_entry(iter, command_list,
                                    async_request_entry) {
-                       chdr = smb2_get_msg(cancel_work->request_buf);
+                       chdr = smb2_get_msg(iter->request_buf);
 
-                       if (cancel_work->async_id !=
+                       if (iter->async_id !=
                            le64_to_cpu(hdr->Id.AsyncId))
                                continue;
 
@@ -6641,7 +6629,7 @@ int smb2_cancel(struct ksmbd_work *work)
                                    "smb2 with AsyncId %llu cancelled command = 0x%x\n",
                                    le64_to_cpu(hdr->Id.AsyncId),
                                    le16_to_cpu(chdr->Command));
-                       canceled = 1;
+                       cancel_work = iter;
                        break;
                }
                spin_unlock(&conn->request_lock);
@@ -6649,24 +6637,24 @@ int smb2_cancel(struct ksmbd_work *work)
                command_list = &conn->requests;
 
                spin_lock(&conn->request_lock);
-               list_for_each_entry(cancel_work, command_list, request_entry) {
-                       chdr = smb2_get_msg(cancel_work->request_buf);
+               list_for_each_entry(iter, command_list, request_entry) {
+                       chdr = smb2_get_msg(iter->request_buf);
 
                        if (chdr->MessageId != hdr->MessageId ||
-                           cancel_work == work)
+                           iter == work)
                                continue;
 
                        ksmbd_debug(SMB,
                                    "smb2 with mid %llu cancelled command = 0x%x\n",
                                    le64_to_cpu(hdr->MessageId),
                                    le16_to_cpu(chdr->Command));
-                       canceled = 1;
+                       cancel_work = iter;
                        break;
                }
                spin_unlock(&conn->request_lock);
        }
 
-       if (canceled) {
+       if (cancel_work) {
                cancel_work->state = KSMBD_WORK_CANCELLED;
                if (cancel_work->cancel_fn)
                        cancel_work->cancel_fn(cancel_work->cancel_argv);
@@ -6804,12 +6792,9 @@ int smb2_lock(struct ksmbd_work *work)
        int prior_lock = 0;
 
        ksmbd_debug(SMB, "Received lock request\n");
-       fp = ksmbd_lookup_fd_slow(work,
-                                 le64_to_cpu(req->VolatileFileId),
-                                 le64_to_cpu(req->PersistentFileId));
+       fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
        if (!fp) {
-               ksmbd_debug(SMB, "Invalid file id for lock : %llu\n",
-                           le64_to_cpu(req->VolatileFileId));
+               ksmbd_debug(SMB, "Invalid file id for lock : %llu\n", req->VolatileFileId);
                err = -ENOENT;
                goto out2;
        }
@@ -7164,8 +7149,8 @@ static int fsctl_copychunk(struct ksmbd_work *work,
 
        ci_rsp = (struct copychunk_ioctl_rsp *)&rsp->Buffer[0];
 
-       rsp->VolatileFileId = cpu_to_le64(volatile_id);
-       rsp->PersistentFileId = cpu_to_le64(persistent_id);
+       rsp->VolatileFileId = volatile_id;
+       rsp->PersistentFileId = persistent_id;
        ci_rsp->ChunksWritten =
                cpu_to_le32(ksmbd_server_side_copy_max_chunk_count());
        ci_rsp->ChunkBytesWritten =
@@ -7379,8 +7364,8 @@ ipv6_retry:
        if (nii_rsp)
                nii_rsp->Next = 0;
 
-       rsp->PersistentFileId = cpu_to_le64(SMB2_NO_FID);
-       rsp->VolatileFileId = cpu_to_le64(SMB2_NO_FID);
+       rsp->PersistentFileId = SMB2_NO_FID;
+       rsp->VolatileFileId = SMB2_NO_FID;
        return nbytes;
 }
 
@@ -7547,9 +7532,7 @@ static int fsctl_request_resume_key(struct ksmbd_work *work,
 {
        struct ksmbd_file *fp;
 
-       fp = ksmbd_lookup_fd_slow(work,
-                                 le64_to_cpu(req->VolatileFileId),
-                                 le64_to_cpu(req->PersistentFileId));
+       fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
        if (!fp)
                return -ENOENT;
 
@@ -7579,7 +7562,7 @@ int smb2_ioctl(struct ksmbd_work *work)
        if (work->next_smb2_rcv_hdr_off) {
                req = ksmbd_req_buf_next(work);
                rsp = ksmbd_resp_buf_next(work);
-               if (!has_file_id(le64_to_cpu(req->VolatileFileId))) {
+               if (!has_file_id(req->VolatileFileId)) {
                        ksmbd_debug(SMB, "Compound request set FID = %llu\n",
                                    work->compound_fid);
                        id = work->compound_fid;
@@ -7590,14 +7573,14 @@ int smb2_ioctl(struct ksmbd_work *work)
        }
 
        if (!has_file_id(id))
-               id = le64_to_cpu(req->VolatileFileId);
+               id = req->VolatileFileId;
 
        if (req->Flags != cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL)) {
                rsp->hdr.Status = STATUS_NOT_SUPPORTED;
                goto out;
        }
 
-       cnt_code = le32_to_cpu(req->CntCode);
+       cnt_code = le32_to_cpu(req->CtlCode);
        ret = smb2_calc_max_out_buf_len(work, 48,
                                        le32_to_cpu(req->MaxOutputResponse));
        if (ret < 0) {
@@ -7656,8 +7639,8 @@ int smb2_ioctl(struct ksmbd_work *work)
                        goto out;
 
                nbytes = sizeof(struct validate_negotiate_info_rsp);
-               rsp->PersistentFileId = cpu_to_le64(SMB2_NO_FID);
-               rsp->VolatileFileId = cpu_to_le64(SMB2_NO_FID);
+               rsp->PersistentFileId = SMB2_NO_FID;
+               rsp->VolatileFileId = SMB2_NO_FID;
                break;
        case FSCTL_QUERY_NETWORK_INTERFACE_INFO:
                ret = fsctl_query_iface_info_ioctl(conn, rsp, out_buf_len);
@@ -7703,10 +7686,10 @@ int smb2_ioctl(struct ksmbd_work *work)
                rsp->PersistentFileId = req->PersistentFileId;
                fsctl_copychunk(work,
                                (struct copychunk_ioctl_req *)&req->Buffer[0],
-                               le32_to_cpu(req->CntCode),
+                               le32_to_cpu(req->CtlCode),
                                le32_to_cpu(req->InputCount),
-                               le64_to_cpu(req->VolatileFileId),
-                               le64_to_cpu(req->PersistentFileId),
+                               req->VolatileFileId,
+                               req->PersistentFileId,
                                rsp);
                break;
        case FSCTL_SET_SPARSE:
@@ -7857,7 +7840,7 @@ dup_ext_out:
                goto out;
        }
 
-       rsp->CntCode = cpu_to_le32(cnt_code);
+       rsp->CtlCode = cpu_to_le32(cnt_code);
        rsp->InputCount = cpu_to_le32(0);
        rsp->InputOffset = cpu_to_le32(112);
        rsp->OutputOffset = cpu_to_le32(112);
@@ -7903,8 +7886,8 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work)
        char req_oplevel = 0, rsp_oplevel = 0;
        unsigned int oplock_change_type;
 
-       volatile_id = le64_to_cpu(req->VolatileFid);
-       persistent_id = le64_to_cpu(req->PersistentFid);
+       volatile_id = req->VolatileFid;
+       persistent_id = req->PersistentFid;
        req_oplevel = req->OplockLevel;
        ksmbd_debug(OPLOCK, "v_id %llu, p_id %llu request oplock level %d\n",
                    volatile_id, persistent_id, req_oplevel);
@@ -7999,8 +7982,8 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work)
        rsp->OplockLevel = rsp_oplevel;
        rsp->Reserved = 0;
        rsp->Reserved2 = 0;
-       rsp->VolatileFid = cpu_to_le64(volatile_id);
-       rsp->PersistentFid = cpu_to_le64(persistent_id);
+       rsp->VolatileFid = volatile_id;
+       rsp->PersistentFid = persistent_id;
        inc_rfc1001_len(work->response_buf, 24);
        return;
 
@@ -8500,7 +8483,7 @@ static void fill_transform_hdr(void *tr_buf, char *old_buf, __le16 cipher_type)
        struct smb2_hdr *hdr = smb2_get_msg(old_buf);
        unsigned int orig_len = get_rfc1002_len(old_buf);
 
-       memset(tr_buf, 0, sizeof(struct smb2_transform_hdr) + 4);
+       /* tr_buf must be cleared by the caller */
        tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
        tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
        tr_hdr->Flags = cpu_to_le16(TRANSFORM_FLAG_ENCRYPTED);
index d494684265761e92a506c5f7ab60dd0c21cb20bf..af455278d00591fcd9673b245924b536f266b794 100644 (file)
 #define FILE_CREATED           0x00000002
 #define FILE_OVERWRITTEN       0x00000003
 
-/*
- * Size of the session key (crypto key encrypted with the password
- */
-#define SMB2_NTLMV2_SESSKEY_SIZE       16
-#define SMB2_SIGNATURE_SIZE            16
-#define SMB2_HMACSHA256_SIZE           32
-#define SMB2_CMACAES_SIZE              16
-#define SMB3_GCM128_CRYPTKEY_SIZE      16
-#define SMB3_GCM256_CRYPTKEY_SIZE      32
-
-/*
- * Size of the smb3 encryption/decryption keys
- */
-#define SMB3_ENC_DEC_KEY_SIZE          32
-
-/*
- * Size of the smb3 signing key
- */
-#define SMB3_SIGN_KEY_SIZE             16
-
-#define CIFS_CLIENT_CHALLENGE_SIZE     8
-#define SMB_SERVER_CHALLENGE_SIZE      8
-
 /* SMB2 Max Credits */
 #define SMB2_MAX_CREDITS               8192
 
-/* Maximum buffer size value we can send with 1 credit */
-#define SMB2_MAX_BUFFER_SIZE 65536
-
-#define NUMBER_OF_SMB2_COMMANDS        0x0013
-
 /* BB FIXME - analyze following length BB */
 #define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */
 
 #define SMB21_DEFAULT_IOSIZE   (1024 * 1024)
-#define SMB3_DEFAULT_IOSIZE    (4 * 1024 * 1024)
 #define SMB3_DEFAULT_TRANS_SIZE        (1024 * 1024)
 #define SMB3_MIN_IOSIZE        (64 * 1024)
 #define SMB3_MAX_IOSIZE        (8 * 1024 * 1024)
  *
  */
 
-#define SMB2_ERROR_STRUCTURE_SIZE2     9
-#define SMB2_ERROR_STRUCTURE_SIZE2_LE  cpu_to_le16(SMB2_ERROR_STRUCTURE_SIZE2)
-
-struct smb2_err_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize;
-       __u8   ErrorContextCount;
-       __u8   Reserved;
-       __le32 ByteCount;  /* even if zero, at least one byte follows */
-       __u8   ErrorData[1];  /* variable length */
-} __packed;
-
 struct preauth_integrity_info {
        /* PreAuth integrity Hash ID */
        __le16                  Preauth_HashId;
@@ -116,8 +75,8 @@ struct create_durable_reconn_req {
        union {
                __u8  Reserved[16];
                struct {
-                       __le64 PersistentFileId;
-                       __le64 VolatileFileId;
+                       __u64 PersistentFileId;
+                       __u64 VolatileFileId;
                } Fid;
        } Data;
 } __packed;
@@ -126,8 +85,8 @@ struct create_durable_reconn_v2_req {
        struct create_context ccontext;
        __u8   Name[8];
        struct {
-               __le64 PersistentFileId;
-               __le64 VolatileFileId;
+               __u64 PersistentFileId;
+               __u64 VolatileFileId;
        } Fid;
        __u8 CreateGuid[16];
        __le32 Flags;
@@ -161,13 +120,6 @@ struct create_alloc_size_req {
        __le64 AllocationSize;
 } __packed;
 
-struct create_posix {
-       struct create_context ccontext;
-       __u8    Name[16];
-       __le32  Mode;
-       __u32   Reserved;
-} __packed;
-
 struct create_durable_rsp {
        struct create_context ccontext;
        __u8   Name[8];
@@ -209,45 +161,6 @@ struct create_posix_rsp {
        u8 SidBuffer[40];
 } __packed;
 
-#define SMB2_LEASE_NONE_LE                     cpu_to_le32(0x00)
-#define SMB2_LEASE_READ_CACHING_LE             cpu_to_le32(0x01)
-#define SMB2_LEASE_HANDLE_CACHING_LE           cpu_to_le32(0x02)
-#define SMB2_LEASE_WRITE_CACHING_LE            cpu_to_le32(0x04)
-
-#define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE   cpu_to_le32(0x02)
-
-#define SMB2_LEASE_KEY_SIZE                    16
-
-struct lease_context {
-       __u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
-       __le32 LeaseState;
-       __le32 LeaseFlags;
-       __le64 LeaseDuration;
-} __packed;
-
-struct lease_context_v2 {
-       __u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
-       __le32 LeaseState;
-       __le32 LeaseFlags;
-       __le64 LeaseDuration;
-       __u8 ParentLeaseKey[SMB2_LEASE_KEY_SIZE];
-       __le16 Epoch;
-       __le16 Reserved;
-} __packed;
-
-struct create_lease {
-       struct create_context ccontext;
-       __u8   Name[8];
-       struct lease_context lcontext;
-} __packed;
-
-struct create_lease_v2 {
-       struct create_context ccontext;
-       __u8   Name[8];
-       struct lease_context_v2 lcontext;
-       __u8   Pad[4];
-} __packed;
-
 struct smb2_buffer_desc_v1 {
        __le64 offset;
        __le32 token;
@@ -256,63 +169,6 @@ struct smb2_buffer_desc_v1 {
 
 #define SMB2_0_IOCTL_IS_FSCTL 0x00000001
 
-struct duplicate_extents_to_file {
-       __u64 PersistentFileHandle; /* source file handle, opaque endianness */
-       __u64 VolatileFileHandle;
-       __le64 SourceFileOffset;
-       __le64 TargetFileOffset;
-       __le64 ByteCount;  /* Bytes to be copied */
-} __packed;
-
-struct smb2_ioctl_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 57 */
-       __le16 Reserved; /* offset from start of SMB2 header to write data */
-       __le32 CntCode;
-       __le64  PersistentFileId;
-       __le64  VolatileFileId;
-       __le32 InputOffset; /* Reserved MBZ */
-       __le32 InputCount;
-       __le32 MaxInputResponse;
-       __le32 OutputOffset;
-       __le32 OutputCount;
-       __le32 MaxOutputResponse;
-       __le32 Flags;
-       __le32 Reserved2;
-       __u8   Buffer[1];
-} __packed;
-
-struct smb2_ioctl_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 49 */
-       __le16 Reserved; /* offset from start of SMB2 header to write data */
-       __le32 CntCode;
-       __le64  PersistentFileId;
-       __le64  VolatileFileId;
-       __le32 InputOffset; /* Reserved MBZ */
-       __le32 InputCount;
-       __le32 OutputOffset;
-       __le32 OutputCount;
-       __le32 Flags;
-       __le32 Reserved2;
-       __u8   Buffer[1];
-} __packed;
-
-struct validate_negotiate_info_req {
-       __le32 Capabilities;
-       __u8   Guid[SMB2_CLIENT_GUID_SIZE];
-       __le16 SecurityMode;
-       __le16 DialectCount;
-       __le16 Dialects[1]; /* dialect (someday maybe list) client asked for */
-} __packed;
-
-struct validate_negotiate_info_rsp {
-       __le32 Capabilities;
-       __u8   Guid[SMB2_CLIENT_GUID_SIZE];
-       __le16 SecurityMode;
-       __le16 Dialect; /* Dialect in use for the connection */
-} __packed;
-
 struct smb_sockaddr_in {
        __be16 Port;
        __be32 IPv4address;
@@ -357,7 +213,7 @@ struct file_object_buf_type1_ioctl_rsp {
 } __packed;
 
 struct resume_key_ioctl_rsp {
-       __le64 ResumeKey[3];
+       __u64 ResumeKey[3];
        __le32 ContextLength;
        __u8 Context[4]; /* ignored, Windows sets to 4 bytes of zero */
 } __packed;
@@ -386,167 +242,6 @@ struct file_sparse {
        __u8    SetSparse;
 } __packed;
 
-struct file_zero_data_information {
-       __le64  FileOffset;
-       __le64  BeyondFinalZero;
-} __packed;
-
-struct file_allocated_range_buffer {
-       __le64  file_offset;
-       __le64  length;
-} __packed;
-
-struct reparse_data_buffer {
-       __le32  ReparseTag;
-       __le16  ReparseDataLength;
-       __u16   Reserved;
-       __u8    DataBuffer[]; /* Variable Length */
-} __packed;
-
-/* SMB2 Notify Action Flags */
-#define FILE_ACTION_ADDED              0x00000001
-#define FILE_ACTION_REMOVED            0x00000002
-#define FILE_ACTION_MODIFIED           0x00000003
-#define FILE_ACTION_RENAMED_OLD_NAME   0x00000004
-#define FILE_ACTION_RENAMED_NEW_NAME   0x00000005
-#define FILE_ACTION_ADDED_STREAM       0x00000006
-#define FILE_ACTION_REMOVED_STREAM     0x00000007
-#define FILE_ACTION_MODIFIED_STREAM    0x00000008
-#define FILE_ACTION_REMOVED_BY_DELETE  0x00000009
-
-#define SMB2_LOCKFLAG_SHARED           0x0001
-#define SMB2_LOCKFLAG_EXCLUSIVE                0x0002
-#define SMB2_LOCKFLAG_UNLOCK           0x0004
-#define SMB2_LOCKFLAG_FAIL_IMMEDIATELY 0x0010
-#define SMB2_LOCKFLAG_MASK             0x0007
-
-struct smb2_lock_element {
-       __le64 Offset;
-       __le64 Length;
-       __le32 Flags;
-       __le32 Reserved;
-} __packed;
-
-struct smb2_lock_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 48 */
-       __le16 LockCount;
-       __le32 Reserved;
-       __le64  PersistentFileId;
-       __le64  VolatileFileId;
-       /* Followed by at least one */
-       struct smb2_lock_element locks[1];
-} __packed;
-
-struct smb2_lock_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 4 */
-       __le16 Reserved;
-} __packed;
-
-struct smb2_echo_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize;   /* Must be 4 */
-       __u16  Reserved;
-} __packed;
-
-struct smb2_echo_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize;   /* Must be 4 */
-       __u16  Reserved;
-} __packed;
-
-/* search (query_directory) Flags field */
-#define SMB2_RESTART_SCANS             0x01
-#define SMB2_RETURN_SINGLE_ENTRY       0x02
-#define SMB2_INDEX_SPECIFIED           0x04
-#define SMB2_REOPEN                    0x10
-
-struct smb2_query_directory_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 33 */
-       __u8   FileInformationClass;
-       __u8   Flags;
-       __le32 FileIndex;
-       __le64  PersistentFileId;
-       __le64  VolatileFileId;
-       __le16 FileNameOffset;
-       __le16 FileNameLength;
-       __le32 OutputBufferLength;
-       __u8   Buffer[1];
-} __packed;
-
-struct smb2_query_directory_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 9 */
-       __le16 OutputBufferOffset;
-       __le32 OutputBufferLength;
-       __u8   Buffer[1];
-} __packed;
-
-/* Possible InfoType values */
-#define SMB2_O_INFO_FILE       0x01
-#define SMB2_O_INFO_FILESYSTEM 0x02
-#define SMB2_O_INFO_SECURITY   0x03
-#define SMB2_O_INFO_QUOTA      0x04
-
-/* Security info type additionalinfo flags. See MS-SMB2 (2.2.37) or MS-DTYP */
-#define OWNER_SECINFO   0x00000001
-#define GROUP_SECINFO   0x00000002
-#define DACL_SECINFO   0x00000004
-#define SACL_SECINFO   0x00000008
-#define LABEL_SECINFO   0x00000010
-#define ATTRIBUTE_SECINFO   0x00000020
-#define SCOPE_SECINFO   0x00000040
-#define BACKUP_SECINFO   0x00010000
-#define UNPROTECTED_SACL_SECINFO   0x10000000
-#define UNPROTECTED_DACL_SECINFO   0x20000000
-#define PROTECTED_SACL_SECINFO   0x40000000
-#define PROTECTED_DACL_SECINFO   0x80000000
-
-struct smb2_query_info_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 41 */
-       __u8   InfoType;
-       __u8   FileInfoClass;
-       __le32 OutputBufferLength;
-       __le16 InputBufferOffset;
-       __u16  Reserved;
-       __le32 InputBufferLength;
-       __le32 AdditionalInformation;
-       __le32 Flags;
-       __le64  PersistentFileId;
-       __le64  VolatileFileId;
-       __u8   Buffer[1];
-} __packed;
-
-struct smb2_query_info_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 9 */
-       __le16 OutputBufferOffset;
-       __le32 OutputBufferLength;
-       __u8   Buffer[1];
-} __packed;
-
-struct smb2_set_info_req {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 33 */
-       __u8   InfoType;
-       __u8   FileInfoClass;
-       __le32 BufferLength;
-       __le16 BufferOffset;
-       __u16  Reserved;
-       __le32 AdditionalInformation;
-       __le64  PersistentFileId;
-       __le64  VolatileFileId;
-       __u8   Buffer[1];
-} __packed;
-
-struct smb2_set_info_rsp {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 2 */
-} __packed;
-
 /* FILE Info response size */
 #define FILE_DIRECTORY_INFORMATION_SIZE       1
 #define FILE_FULL_DIRECTORY_INFORMATION_SIZE  2
@@ -602,145 +297,11 @@ struct fs_type_info {
        long            magic_number;
 } __packed;
 
-struct smb2_oplock_break {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 24 */
-       __u8   OplockLevel;
-       __u8   Reserved;
-       __le32 Reserved2;
-       __le64  PersistentFid;
-       __le64  VolatileFid;
-} __packed;
-
-#define SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED cpu_to_le32(0x01)
-
-struct smb2_lease_break {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 44 */
-       __le16 Epoch;
-       __le32 Flags;
-       __u8   LeaseKey[16];
-       __le32 CurrentLeaseState;
-       __le32 NewLeaseState;
-       __le32 BreakReason;
-       __le32 AccessMaskHint;
-       __le32 ShareMaskHint;
-} __packed;
-
-struct smb2_lease_ack {
-       struct smb2_hdr hdr;
-       __le16 StructureSize; /* Must be 36 */
-       __le16 Reserved;
-       __le32 Flags;
-       __u8   LeaseKey[16];
-       __le32 LeaseState;
-       __le64 LeaseDuration;
-} __packed;
-
 /*
- *     PDU infolevel structure definitions
+ *     PDU query infolevel structure definitions
  *     BB consider moving to a different header
  */
 
-/* File System Information Classes */
-#define FS_VOLUME_INFORMATION          1 /* Query */
-#define FS_LABEL_INFORMATION           2 /* Set */
-#define FS_SIZE_INFORMATION            3 /* Query */
-#define FS_DEVICE_INFORMATION          4 /* Query */
-#define FS_ATTRIBUTE_INFORMATION       5 /* Query */
-#define FS_CONTROL_INFORMATION         6 /* Query, Set */
-#define FS_FULL_SIZE_INFORMATION       7 /* Query */
-#define FS_OBJECT_ID_INFORMATION       8 /* Query, Set */
-#define FS_DRIVER_PATH_INFORMATION     9 /* Query */
-#define FS_SECTOR_SIZE_INFORMATION     11 /* SMB3 or later. Query */
-#define FS_POSIX_INFORMATION           100 /* SMB3.1.1 POSIX. Query */
-
-struct smb2_fs_full_size_info {
-       __le64 TotalAllocationUnits;
-       __le64 CallerAvailableAllocationUnits;
-       __le64 ActualAvailableAllocationUnits;
-       __le32 SectorsPerAllocationUnit;
-       __le32 BytesPerSector;
-} __packed;
-
-#define SSINFO_FLAGS_ALIGNED_DEVICE            0x00000001
-#define SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE 0x00000002
-#define SSINFO_FLAGS_NO_SEEK_PENALTY           0x00000004
-#define SSINFO_FLAGS_TRIM_ENABLED              0x00000008
-
-/* sector size info struct */
-struct smb3_fs_ss_info {
-       __le32 LogicalBytesPerSector;
-       __le32 PhysicalBytesPerSectorForAtomicity;
-       __le32 PhysicalBytesPerSectorForPerf;
-       __le32 FSEffPhysicalBytesPerSectorForAtomicity;
-       __le32 Flags;
-       __le32 ByteOffsetForSectorAlignment;
-       __le32 ByteOffsetForPartitionAlignment;
-} __packed;
-
-/* File System Control Information */
-struct smb2_fs_control_info {
-       __le64 FreeSpaceStartFiltering;
-       __le64 FreeSpaceThreshold;
-       __le64 FreeSpaceStopFiltering;
-       __le64 DefaultQuotaThreshold;
-       __le64 DefaultQuotaLimit;
-       __le32 FileSystemControlFlags;
-       __le32 Padding;
-} __packed;
-
-/* partial list of QUERY INFO levels */
-#define FILE_DIRECTORY_INFORMATION     1
-#define FILE_FULL_DIRECTORY_INFORMATION 2
-#define FILE_BOTH_DIRECTORY_INFORMATION 3
-#define FILE_BASIC_INFORMATION         4
-#define FILE_STANDARD_INFORMATION      5
-#define FILE_INTERNAL_INFORMATION      6
-#define FILE_EA_INFORMATION            7
-#define FILE_ACCESS_INFORMATION                8
-#define FILE_NAME_INFORMATION          9
-#define FILE_RENAME_INFORMATION                10
-#define FILE_LINK_INFORMATION          11
-#define FILE_NAMES_INFORMATION         12
-#define FILE_DISPOSITION_INFORMATION   13
-#define FILE_POSITION_INFORMATION      14
-#define FILE_FULL_EA_INFORMATION       15
-#define FILE_MODE_INFORMATION          16
-#define FILE_ALIGNMENT_INFORMATION     17
-#define FILE_ALL_INFORMATION           18
-#define FILE_ALLOCATION_INFORMATION    19
-#define FILE_END_OF_FILE_INFORMATION   20
-#define FILE_ALTERNATE_NAME_INFORMATION 21
-#define FILE_STREAM_INFORMATION                22
-#define FILE_PIPE_INFORMATION          23
-#define FILE_PIPE_LOCAL_INFORMATION    24
-#define FILE_PIPE_REMOTE_INFORMATION   25
-#define FILE_MAILSLOT_QUERY_INFORMATION 26
-#define FILE_MAILSLOT_SET_INFORMATION  27
-#define FILE_COMPRESSION_INFORMATION   28
-#define FILE_OBJECT_ID_INFORMATION     29
-/* Number 30 not defined in documents */
-#define FILE_MOVE_CLUSTER_INFORMATION  31
-#define FILE_QUOTA_INFORMATION         32
-#define FILE_REPARSE_POINT_INFORMATION 33
-#define FILE_NETWORK_OPEN_INFORMATION  34
-#define FILE_ATTRIBUTE_TAG_INFORMATION 35
-#define FILE_TRACKING_INFORMATION      36
-#define FILEID_BOTH_DIRECTORY_INFORMATION 37
-#define FILEID_FULL_DIRECTORY_INFORMATION 38
-#define FILE_VALID_DATA_LENGTH_INFORMATION 39
-#define FILE_SHORT_NAME_INFORMATION    40
-#define FILE_SFIO_RESERVE_INFORMATION  44
-#define FILE_SFIO_VOLUME_INFORMATION   45
-#define FILE_HARD_LINK_INFORMATION     46
-#define FILE_NORMALIZED_NAME_INFORMATION 48
-#define FILEID_GLOBAL_TX_DIRECTORY_INFORMATION 50
-#define FILE_STANDARD_LINK_INFORMATION 54
-
-#define OP_BREAK_STRUCT_SIZE_20                24
-#define OP_BREAK_STRUCT_SIZE_21                36
-
 struct smb2_file_access_info {
        __le32 AccessFlags;
 } __packed;
@@ -749,56 +310,6 @@ struct smb2_file_alignment_info {
        __le32 AlignmentRequirement;
 } __packed;
 
-struct smb2_file_internal_info {
-       __le64 IndexNumber;
-} __packed; /* level 6 Query */
-
-struct smb2_file_rename_info { /* encoding of request for level 10 */
-       __u8   ReplaceIfExists; /* 1 = replace existing target with new */
-                               /* 0 = fail if target already exists */
-       __u8   Reserved[7];
-       __u64  RootDirectory;  /* MBZ for network operations (why says spec?) */
-       __le32 FileNameLength;
-       char   FileName[];     /* New name to be assigned */
-} __packed; /* level 10 Set */
-
-struct smb2_file_link_info { /* encoding of request for level 11 */
-       __u8   ReplaceIfExists; /* 1 = replace existing link with new */
-                               /* 0 = fail if link already exists */
-       __u8   Reserved[7];
-       __u64  RootDirectory;  /* MBZ for network operations (why says spec?) */
-       __le32 FileNameLength;
-       char   FileName[];     /* Name to be assigned to new link */
-} __packed; /* level 11 Set */
-
-/*
- * This level 18, although with struct with same name is different from cifs
- * level 0x107. Level 0x107 has an extra u64 between AccessFlags and
- * CurrentByteOffset.
- */
-struct smb2_file_all_info { /* data block encoding of response to level 18 */
-       __le64 CreationTime;    /* Beginning of FILE_BASIC_INFO equivalent */
-       __le64 LastAccessTime;
-       __le64 LastWriteTime;
-       __le64 ChangeTime;
-       __le32 Attributes;
-       __u32  Pad1;            /* End of FILE_BASIC_INFO_INFO equivalent */
-       __le64 AllocationSize;  /* Beginning of FILE_STANDARD_INFO equivalent */
-       __le64 EndOfFile;       /* size ie offset to first free byte in file */
-       __le32 NumberOfLinks;   /* hard links */
-       __u8   DeletePending;
-       __u8   Directory;
-       __u16  Pad2;            /* End of FILE_STANDARD_INFO equivalent */
-       __le64 IndexNumber;
-       __le32 EASize;
-       __le32 AccessFlags;
-       __le64 CurrentByteOffset;
-       __le32 Mode;
-       __le32 AlignmentRequirement;
-       __le32 FileNameLength;
-       char   FileName[1];
-} __packed; /* level 18 Query */
-
 struct smb2_file_basic_info { /* data block encoding of response to level 18 */
        __le64 CreationTime;    /* Beginning of FILE_BASIC_INFO equivalent */
        __le64 LastAccessTime;
@@ -821,10 +332,6 @@ struct smb2_file_stream_info {
        char   StreamName[];
 } __packed;
 
-struct smb2_file_eof_info { /* encoding of request for level 10 */
-       __le64 EndOfFile; /* new end of file value */
-} __packed; /* level 20 Set */
-
 struct smb2_file_ntwrk_info {
        __le64 CreationTime;
        __le64 LastAccessTime;
@@ -915,34 +422,6 @@ struct create_sd_buf_req {
        struct smb_ntsd ntsd;
 } __packed;
 
-/* Find File infolevels */
-#define SMB_FIND_FILE_POSIX_INFO       0x064
-
-/* Level 100 query info */
-struct smb311_posix_qinfo {
-       __le64 CreationTime;
-       __le64 LastAccessTime;
-       __le64 LastWriteTime;
-       __le64 ChangeTime;
-       __le64 EndOfFile;
-       __le64 AllocationSize;
-       __le32 DosAttributes;
-       __le64 Inode;
-       __le32 DeviceId;
-       __le32 Zero;
-       /* beginning of POSIX Create Context Response */
-       __le32 HardLinks;
-       __le32 ReparseTag;
-       __le32 Mode;
-       u8     Sids[];
-       /*
-        * var sized owner SID
-        * var sized group SID
-        * le32 filenamelength
-        * u8  filename[]
-        */
-} __packed;
-
 struct smb2_posix_info {
        __le32 NextEntryOffset;
        __u32 Ignored;
index 82a1429bbe127ee562172b08111601ade2050a32..8fef9de787d34f7e64a235c9096645214290a7b3 100644 (file)
@@ -476,7 +476,7 @@ static int ksmbd_netdev_event(struct notifier_block *nb, unsigned long event,
 
        switch (event) {
        case NETDEV_UP:
-               if (netdev->priv_flags & IFF_BRIDGE_PORT)
+               if (netif_is_bridge_port(netdev))
                        return NOTIFY_OK;
 
                list_for_each_entry(iface, &iface_list, entry) {
@@ -585,7 +585,7 @@ int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz)
 
                rtnl_lock();
                for_each_netdev(&init_net, netdev) {
-                       if (netdev->priv_flags & IFF_BRIDGE_PORT)
+                       if (netif_is_bridge_port(netdev))
                                continue;
                        if (!alloc_iface(kstrdup(netdev->name, GFP_KERNEL)))
                                return -ENOMEM;
index 3f1829b3ab5b7cda8e2a48071efd6ac23cff4bac..509657fdf4f56dd23ebd9497fbe9073fe1904608 100644 (file)
@@ -3673,18 +3673,14 @@ static struct dentry *filename_create(int dfd, struct filename *name,
 {
        struct dentry *dentry = ERR_PTR(-EEXIST);
        struct qstr last;
+       bool want_dir = lookup_flags & LOOKUP_DIRECTORY;
+       unsigned int reval_flag = lookup_flags & LOOKUP_REVAL;
+       unsigned int create_flags = LOOKUP_CREATE | LOOKUP_EXCL;
        int type;
        int err2;
        int error;
-       bool is_dir = (lookup_flags & LOOKUP_DIRECTORY);
 
-       /*
-        * Note that only LOOKUP_REVAL and LOOKUP_DIRECTORY matter here. Any
-        * other flags passed in are ignored!
-        */
-       lookup_flags &= LOOKUP_REVAL;
-
-       error = filename_parentat(dfd, name, lookup_flags, path, &last, &type);
+       error = filename_parentat(dfd, name, reval_flag, path, &last, &type);
        if (error)
                return ERR_PTR(error);
 
@@ -3698,11 +3694,13 @@ static struct dentry *filename_create(int dfd, struct filename *name,
        /* don't fail immediately if it's r/o, at least try to report other errors */
        err2 = mnt_want_write(path->mnt);
        /*
-        * Do the final lookup.
+        * Do the final lookup.  Suppress 'create' if there is a trailing
+        * '/', and a directory wasn't requested.
         */
-       lookup_flags |= LOOKUP_CREATE | LOOKUP_EXCL;
+       if (last.name[last.len] && !want_dir)
+               create_flags = 0;
        inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
-       dentry = __lookup_hash(&last, path->dentry, lookup_flags);
+       dentry = __lookup_hash(&last, path->dentry, reval_flag | create_flags);
        if (IS_ERR(dentry))
                goto unlock;
 
@@ -3716,7 +3714,7 @@ static struct dentry *filename_create(int dfd, struct filename *name,
         * all is fine. Let's be bastards - you had / on the end, you've
         * been asking for (non-existent) directory. -ENOENT for you.
         */
-       if (unlikely(!is_dir && last.name[last.len])) {
+       if (unlikely(!create_flags)) {
                error = -ENOENT;
                goto fail;
        }
index 6e9844b8c6fb46295a3e09edfbb6fdebdaafe7b0..a0a36bfa3aa0543b75da0c6784dbacb50f53d805 100644 (file)
@@ -2112,22 +2112,23 @@ static int invent_group_ids(struct mount *mnt, bool recurse)
 int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
 {
        unsigned int max = READ_ONCE(sysctl_mount_max);
-       unsigned int mounts = 0, old, pending, sum;
+       unsigned int mounts = 0;
        struct mount *p;
 
+       if (ns->mounts >= max)
+               return -ENOSPC;
+       max -= ns->mounts;
+       if (ns->pending_mounts >= max)
+               return -ENOSPC;
+       max -= ns->pending_mounts;
+
        for (p = mnt; p; p = next_mnt(p, mnt))
                mounts++;
 
-       old = ns->mounts;
-       pending = ns->pending_mounts;
-       sum = old + pending;
-       if ((old > sum) ||
-           (pending > sum) ||
-           (max < sum) ||
-           (mounts > (max - sum)))
+       if (mounts > max)
                return -ENOSPC;
 
-       ns->pending_mounts = pending + mounts;
+       ns->pending_mounts += mounts;
        return 0;
 }
 
@@ -2921,7 +2922,7 @@ static int do_move_mount_old(struct path *path, const char *old_name)
  * add a mount into a namespace's mount tree
  */
 static int do_add_mount(struct mount *newmnt, struct mountpoint *mp,
-                       struct path *path, int mnt_flags)
+                       const struct path *path, int mnt_flags)
 {
        struct mount *parent = real_mount(path->mnt);
 
@@ -3044,7 +3045,7 @@ static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
        return err;
 }
 
-int finish_automount(struct vfsmount *m, struct path *path)
+int finish_automount(struct vfsmount *m, const struct path *path)
 {
        struct dentry *dentry = path->dentry;
        struct mountpoint *mp;
index c15bfc966d96db7494783d374113544ca7fc7d37..f684c0cd1ec5e6ebc0e5e3c411eb932f3f2c7d79 100644 (file)
@@ -1,5 +1,11 @@
 # SPDX-License-Identifier: GPL-2.0
 
-netfs-y := read_helper.o stats.o
+netfs-y := \
+       buffered_read.o \
+       io.o \
+       main.o \
+       objects.o
+
+netfs-$(CONFIG_NETFS_STATS) += stats.o
 
 obj-$(CONFIG_NETFS_SUPPORT) := netfs.o
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
new file mode 100644 (file)
index 0000000..281a88a
--- /dev/null
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Network filesystem high-level buffered read support.
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/task_io_accounting_ops.h>
+#include "internal.h"
+
+/*
+ * Unlock the folios in a read operation.  We need to set PG_fscache on any
+ * folios we're going to write back before we unlock them.
+ */
+void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
+{
+       struct netfs_io_subrequest *subreq;
+       struct folio *folio;
+       unsigned int iopos, account = 0;
+       pgoff_t start_page = rreq->start / PAGE_SIZE;
+       pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
+       bool subreq_failed = false;
+
+       XA_STATE(xas, &rreq->mapping->i_pages, start_page);
+
+       if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) {
+               __clear_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
+               list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+                       __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
+               }
+       }
+
+       /* Walk through the pagecache and the I/O request lists simultaneously.
+        * We may have a mixture of cached and uncached sections and we only
+        * really want to write out the uncached sections.  This is slightly
+        * complicated by the possibility that we might have huge pages with a
+        * mixture inside.
+        */
+       subreq = list_first_entry(&rreq->subrequests,
+                                 struct netfs_io_subrequest, rreq_link);
+       iopos = 0;
+       subreq_failed = (subreq->error < 0);
+
+       trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
+
+       rcu_read_lock();
+       xas_for_each(&xas, folio, last_page) {
+               unsigned int pgpos = (folio_index(folio) - start_page) * PAGE_SIZE;
+               unsigned int pgend = pgpos + folio_size(folio);
+               bool pg_failed = false;
+
+               for (;;) {
+                       if (!subreq) {
+                               pg_failed = true;
+                               break;
+                       }
+                       if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
+                               folio_start_fscache(folio);
+                       pg_failed |= subreq_failed;
+                       if (pgend < iopos + subreq->len)
+                               break;
+
+                       account += subreq->transferred;
+                       iopos += subreq->len;
+                       if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
+                               subreq = list_next_entry(subreq, rreq_link);
+                               subreq_failed = (subreq->error < 0);
+                       } else {
+                               subreq = NULL;
+                               subreq_failed = false;
+                       }
+                       if (pgend == iopos)
+                               break;
+               }
+
+               if (!pg_failed) {
+                       flush_dcache_folio(folio);
+                       folio_mark_uptodate(folio);
+               }
+
+               if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
+                       if (folio_index(folio) == rreq->no_unlock_folio &&
+                           test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
+                               _debug("no unlock");
+                       else
+                               folio_unlock(folio);
+               }
+       }
+       rcu_read_unlock();
+
+       task_io_account_read(account);
+       if (rreq->netfs_ops->done)
+               rreq->netfs_ops->done(rreq);
+}
+
+static void netfs_cache_expand_readahead(struct netfs_io_request *rreq,
+                                        loff_t *_start, size_t *_len, loff_t i_size)
+{
+       struct netfs_cache_resources *cres = &rreq->cache_resources;
+
+       if (cres->ops && cres->ops->expand_readahead)
+               cres->ops->expand_readahead(cres, _start, _len, i_size);
+}
+
+static void netfs_rreq_expand(struct netfs_io_request *rreq,
+                             struct readahead_control *ractl)
+{
+       /* Give the cache a chance to change the request parameters.  The
+        * resultant request must contain the original region.
+        */
+       netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size);
+
+       /* Give the netfs a chance to change the request parameters.  The
+        * resultant request must contain the original region.
+        */
+       if (rreq->netfs_ops->expand_readahead)
+               rreq->netfs_ops->expand_readahead(rreq);
+
+       /* Expand the request if the cache wants it to start earlier.  Note
+        * that the expansion may get further extended if the VM wishes to
+        * insert THPs and the preferred start and/or end wind up in the middle
+        * of THPs.
+        *
+        * If this is the case, however, the THP size should be an integer
+        * multiple of the cache granule size, so we get a whole number of
+        * granules to deal with.
+        */
+       if (rreq->start  != readahead_pos(ractl) ||
+           rreq->len != readahead_length(ractl)) {
+               readahead_expand(ractl, rreq->start, rreq->len);
+               rreq->start  = readahead_pos(ractl);
+               rreq->len = readahead_length(ractl);
+
+               trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
+                                netfs_read_trace_expanded);
+       }
+}
+
+/**
+ * netfs_readahead - Helper to manage a read request
+ * @ractl: The description of the readahead request
+ *
+ * Fulfil a readahead request by drawing data from the cache if possible, or
+ * the netfs if not.  Space beyond the EOF is zero-filled.  Multiple I/O
+ * requests from different sources will get munged together.  If necessary, the
+ * readahead window can be expanded in either direction to a more convenient
+ * alighment for RPC efficiency or to make storage in the cache feasible.
+ *
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
+ *
+ * This is usable whether or not caching is enabled.
+ */
+void netfs_readahead(struct readahead_control *ractl)
+{
+       struct netfs_io_request *rreq;
+       struct netfs_i_context *ctx = netfs_i_context(ractl->mapping->host);
+       int ret;
+
+       _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
+
+       if (readahead_count(ractl) == 0)
+               return;
+
+       rreq = netfs_alloc_request(ractl->mapping, ractl->file,
+                                  readahead_pos(ractl),
+                                  readahead_length(ractl),
+                                  NETFS_READAHEAD);
+       if (IS_ERR(rreq))
+               return;
+
+       if (ctx->ops->begin_cache_operation) {
+               ret = ctx->ops->begin_cache_operation(rreq);
+               if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+                       goto cleanup_free;
+       }
+
+       netfs_stat(&netfs_n_rh_readahead);
+       trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
+                        netfs_read_trace_readahead);
+
+       netfs_rreq_expand(rreq, ractl);
+
+       /* Drop the refs on the folios here rather than in the cache or
+        * filesystem.  The locks will be dropped in netfs_rreq_unlock().
+        */
+       while (readahead_folio(ractl))
+               ;
+
+       netfs_begin_read(rreq, false);
+       return;
+
+cleanup_free:
+       netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
+       return;
+}
+EXPORT_SYMBOL(netfs_readahead);
+
+/**
+ * netfs_readpage - Helper to manage a readpage request
+ * @file: The file to read from
+ * @subpage: A subpage of the folio to read
+ *
+ * Fulfil a readpage request by drawing data from the cache if possible, or the
+ * netfs if not.  Space beyond the EOF is zero-filled.  Multiple I/O requests
+ * from different sources will get munged together.
+ *
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
+ *
+ * This is usable whether or not caching is enabled.
+ */
+int netfs_readpage(struct file *file, struct page *subpage)
+{
+       struct folio *folio = page_folio(subpage);
+       struct address_space *mapping = folio_file_mapping(folio);
+       struct netfs_io_request *rreq;
+       struct netfs_i_context *ctx = netfs_i_context(mapping->host);
+       int ret;
+
+       _enter("%lx", folio_index(folio));
+
+       rreq = netfs_alloc_request(mapping, file,
+                                  folio_file_pos(folio), folio_size(folio),
+                                  NETFS_READPAGE);
+       if (IS_ERR(rreq)) {
+               ret = PTR_ERR(rreq);
+               goto alloc_error;
+       }
+
+       if (ctx->ops->begin_cache_operation) {
+               ret = ctx->ops->begin_cache_operation(rreq);
+               if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+                       goto discard;
+       }
+
+       netfs_stat(&netfs_n_rh_readpage);
+       trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
+       return netfs_begin_read(rreq, true);
+
+discard:
+       netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
+alloc_error:
+       folio_unlock(folio);
+       return ret;
+}
+EXPORT_SYMBOL(netfs_readpage);
+
+/*
+ * Prepare a folio for writing without reading first
+ * @folio: The folio being prepared
+ * @pos: starting position for the write
+ * @len: length of write
+ * @always_fill: T if the folio should always be completely filled/cleared
+ *
+ * In some cases, write_begin doesn't need to read at all:
+ * - full folio write
+ * - write that lies in a folio that is completely beyond EOF
+ * - write that covers the folio from start to EOF or beyond it
+ *
+ * If any of these criteria are met, then zero out the unwritten parts
+ * of the folio and return true. Otherwise, return false.
+ */
+static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len,
+                                bool always_fill)
+{
+       struct inode *inode = folio_inode(folio);
+       loff_t i_size = i_size_read(inode);
+       size_t offset = offset_in_folio(folio, pos);
+       size_t plen = folio_size(folio);
+
+       if (unlikely(always_fill)) {
+               if (pos - offset + len <= i_size)
+                       return false; /* Page entirely before EOF */
+               zero_user_segment(&folio->page, 0, plen);
+               folio_mark_uptodate(folio);
+               return true;
+       }
+
+       /* Full folio write */
+       if (offset == 0 && len >= plen)
+               return true;
+
+       /* Page entirely beyond the end of the file */
+       if (pos - offset >= i_size)
+               goto zero_out;
+
+       /* Write that covers from the start of the folio to EOF or beyond */
+       if (offset == 0 && (pos + len) >= i_size)
+               goto zero_out;
+
+       return false;
+zero_out:
+       zero_user_segments(&folio->page, 0, offset, offset + len, plen);
+       return true;
+}
+
+/**
+ * netfs_write_begin - Helper to prepare for writing
+ * @file: The file to read from
+ * @mapping: The mapping to read from
+ * @pos: File position at which the write will begin
+ * @len: The length of the write (may extend beyond the end of the folio chosen)
+ * @aop_flags: AOP_* flags
+ * @_folio: Where to put the resultant folio
+ * @_fsdata: Place for the netfs to store a cookie
+ *
+ * Pre-read data for a write-begin request by drawing data from the cache if
+ * possible, or the netfs if not.  Space beyond the EOF is zero-filled.
+ * Multiple I/O requests from different sources will get munged together.  If
+ * necessary, the readahead window can be expanded in either direction to a
+ * more convenient alighment for RPC efficiency or to make storage in the cache
+ * feasible.
+ *
+ * The calling netfs must provide a table of operations, only one of which,
+ * issue_op, is mandatory.
+ *
+ * The check_write_begin() operation can be provided to check for and flush
+ * conflicting writes once the folio is grabbed and locked.  It is passed a
+ * pointer to the fsdata cookie that gets returned to the VM to be passed to
+ * write_end.  It is permitted to sleep.  It should return 0 if the request
+ * should go ahead; unlock the folio and return -EAGAIN to cause the folio to
+ * be regot; or return an error.
+ *
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
+ *
+ * This is usable whether or not caching is enabled.
+ */
+int netfs_write_begin(struct file *file, struct address_space *mapping,
+                     loff_t pos, unsigned int len, unsigned int aop_flags,
+                     struct folio **_folio, void **_fsdata)
+{
+       struct netfs_io_request *rreq;
+       struct netfs_i_context *ctx = netfs_i_context(file_inode(file ));
+       struct folio *folio;
+       unsigned int fgp_flags;
+       pgoff_t index = pos >> PAGE_SHIFT;
+       int ret;
+
+       DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
+
+retry:
+       fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
+       if (aop_flags & AOP_FLAG_NOFS)
+               fgp_flags |= FGP_NOFS;
+       folio = __filemap_get_folio(mapping, index, fgp_flags,
+                                   mapping_gfp_mask(mapping));
+       if (!folio)
+               return -ENOMEM;
+
+       if (ctx->ops->check_write_begin) {
+               /* Allow the netfs (eg. ceph) to flush conflicts. */
+               ret = ctx->ops->check_write_begin(file, pos, len, folio, _fsdata);
+               if (ret < 0) {
+                       trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
+                       if (ret == -EAGAIN)
+                               goto retry;
+                       goto error;
+               }
+       }
+
+       if (folio_test_uptodate(folio))
+               goto have_folio;
+
+       /* If the page is beyond the EOF, we want to clear it - unless it's
+        * within the cache granule containing the EOF, in which case we need
+        * to preload the granule.
+        */
+       if (!netfs_is_cache_enabled(ctx) &&
+           netfs_skip_folio_read(folio, pos, len, false)) {
+               netfs_stat(&netfs_n_rh_write_zskip);
+               goto have_folio_no_wait;
+       }
+
+       rreq = netfs_alloc_request(mapping, file,
+                                  folio_file_pos(folio), folio_size(folio),
+                                  NETFS_READ_FOR_WRITE);
+       if (IS_ERR(rreq)) {
+               ret = PTR_ERR(rreq);
+               goto error;
+       }
+       rreq->no_unlock_folio   = folio_index(folio);
+       __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
+
+       if (ctx->ops->begin_cache_operation) {
+               ret = ctx->ops->begin_cache_operation(rreq);
+               if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+                       goto error_put;
+       }
+
+       netfs_stat(&netfs_n_rh_write_begin);
+       trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
+
+       /* Expand the request to meet caching requirements and download
+        * preferences.
+        */
+       ractl._nr_pages = folio_nr_pages(folio);
+       netfs_rreq_expand(rreq, &ractl);
+
+       /* We hold the folio locks, so we can drop the references */
+       folio_get(folio);
+       while (readahead_folio(&ractl))
+               ;
+
+       ret = netfs_begin_read(rreq, true);
+       if (ret < 0)
+               goto error;
+
+have_folio:
+       ret = folio_wait_fscache_killable(folio);
+       if (ret < 0)
+               goto error;
+have_folio_no_wait:
+       *_folio = folio;
+       _leave(" = 0");
+       return 0;
+
+error_put:
+       netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
+error:
+       folio_unlock(folio);
+       folio_put(folio);
+       _leave(" = %d", ret);
+       return ret;
+}
+EXPORT_SYMBOL(netfs_write_begin);
index b7f2c4459f33683cf80a1591ca9b586fb9bcb37f..b7b0e3d18d9e8583d9b2bceb6e9661e8768f3ac6 100644 (file)
@@ -5,6 +5,10 @@
  * Written by David Howells (dhowells@redhat.com)
  */
 
+#include <linux/netfs.h>
+#include <linux/fscache.h>
+#include <trace/events/netfs.h>
+
 #ifdef pr_fmt
 #undef pr_fmt
 #endif
 #define pr_fmt(fmt) "netfs: " fmt
 
 /*
- * read_helper.c
+ * buffered_read.c
+ */
+void netfs_rreq_unlock_folios(struct netfs_io_request *rreq);
+
+/*
+ * io.c
+ */
+int netfs_begin_read(struct netfs_io_request *rreq, bool sync);
+
+/*
+ * main.c
  */
 extern unsigned int netfs_debug;
 
+/*
+ * objects.c
+ */
+struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
+                                            struct file *file,
+                                            loff_t start, size_t len,
+                                            enum netfs_io_origin origin);
+void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
+void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async);
+void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
+                      enum netfs_rreq_ref_trace what);
+struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq);
+
+static inline void netfs_see_request(struct netfs_io_request *rreq,
+                                    enum netfs_rreq_ref_trace what)
+{
+       trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what);
+}
+
 /*
  * stats.c
  */
@@ -55,6 +88,21 @@ static inline void netfs_stat_d(atomic_t *stat)
 #define netfs_stat_d(x) do {} while(0)
 #endif
 
+/*
+ * Miscellaneous functions.
+ */
+static inline bool netfs_is_cache_enabled(struct netfs_i_context *ctx)
+{
+#if IS_ENABLED(CONFIG_FSCACHE)
+       struct fscache_cookie *cookie = ctx->cache;
+
+       return fscache_cookie_valid(cookie) && cookie->cache_priv &&
+               fscache_cookie_enabled(cookie);
+#else
+       return false;
+#endif
+}
+
 /*****************************************************************************/
 /*
  * debug tracing
diff --git a/fs/netfs/io.c b/fs/netfs/io.c
new file mode 100644 (file)
index 0000000..4289258
--- /dev/null
@@ -0,0 +1,657 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Network filesystem high-level read support.
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/uio.h>
+#include <linux/sched/mm.h>
+#include <linux/task_io_accounting_ops.h>
+#include "internal.h"
+
+/*
+ * Clear the unread part of an I/O request.
+ */
+static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
+{
+       struct iov_iter iter;
+
+       iov_iter_xarray(&iter, READ, &subreq->rreq->mapping->i_pages,
+                       subreq->start + subreq->transferred,
+                       subreq->len   - subreq->transferred);
+       iov_iter_zero(iov_iter_count(&iter), &iter);
+}
+
+static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
+                                       bool was_async)
+{
+       struct netfs_io_subrequest *subreq = priv;
+
+       netfs_subreq_terminated(subreq, transferred_or_error, was_async);
+}
+
+/*
+ * Issue a read against the cache.
+ * - Eats the caller's ref on subreq.
+ */
+static void netfs_read_from_cache(struct netfs_io_request *rreq,
+                                 struct netfs_io_subrequest *subreq,
+                                 enum netfs_read_from_hole read_hole)
+{
+       struct netfs_cache_resources *cres = &rreq->cache_resources;
+       struct iov_iter iter;
+
+       netfs_stat(&netfs_n_rh_read);
+       iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
+                       subreq->start + subreq->transferred,
+                       subreq->len   - subreq->transferred);
+
+       cres->ops->read(cres, subreq->start, &iter, read_hole,
+                       netfs_cache_read_terminated, subreq);
+}
+
+/*
+ * Fill a subrequest region with zeroes.
+ */
+static void netfs_fill_with_zeroes(struct netfs_io_request *rreq,
+                                  struct netfs_io_subrequest *subreq)
+{
+       netfs_stat(&netfs_n_rh_zero);
+       __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+       netfs_subreq_terminated(subreq, 0, false);
+}
+
+/*
+ * Ask the netfs to issue a read request to the server for us.
+ *
+ * The netfs is expected to read from subreq->pos + subreq->transferred to
+ * subreq->pos + subreq->len - 1.  It may not backtrack and write data into the
+ * buffer prior to the transferred point as it might clobber dirty data
+ * obtained from the cache.
+ *
+ * Alternatively, the netfs is allowed to indicate one of two things:
+ *
+ * - NETFS_SREQ_SHORT_READ: A short read - it will get called again to try and
+ *   make progress.
+ *
+ * - NETFS_SREQ_CLEAR_TAIL: A short read - the rest of the buffer will be
+ *   cleared.
+ */
+static void netfs_read_from_server(struct netfs_io_request *rreq,
+                                  struct netfs_io_subrequest *subreq)
+{
+       netfs_stat(&netfs_n_rh_download);
+       rreq->netfs_ops->issue_read(subreq);
+}
+
+/*
+ * Release those waiting.
+ */
+static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async)
+{
+       trace_netfs_rreq(rreq, netfs_rreq_trace_done);
+       netfs_clear_subrequests(rreq, was_async);
+       netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete);
+}
+
+/*
+ * Deal with the completion of writing the data to the cache.  We have to clear
+ * the PG_fscache bits on the folios involved and release the caller's ref.
+ *
+ * May be called in softirq mode and we inherit a ref from the caller.
+ */
+static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
+                                         bool was_async)
+{
+       struct netfs_io_subrequest *subreq;
+       struct folio *folio;
+       pgoff_t unlocked = 0;
+       bool have_unlocked = false;
+
+       rcu_read_lock();
+
+       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+               XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
+
+               xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
+                       /* We might have multiple writes from the same huge
+                        * folio, but we mustn't unlock a folio more than once.
+                        */
+                       if (have_unlocked && folio_index(folio) <= unlocked)
+                               continue;
+                       unlocked = folio_index(folio);
+                       folio_end_fscache(folio);
+                       have_unlocked = true;
+               }
+       }
+
+       rcu_read_unlock();
+       netfs_rreq_completed(rreq, was_async);
+}
+
+static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
+                                      bool was_async)
+{
+       struct netfs_io_subrequest *subreq = priv;
+       struct netfs_io_request *rreq = subreq->rreq;
+
+       if (IS_ERR_VALUE(transferred_or_error)) {
+               netfs_stat(&netfs_n_rh_write_failed);
+               trace_netfs_failure(rreq, subreq, transferred_or_error,
+                                   netfs_fail_copy_to_cache);
+       } else {
+               netfs_stat(&netfs_n_rh_write_done);
+       }
+
+       trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
+
+       /* If we decrement nr_copy_ops to 0, the ref belongs to us. */
+       if (atomic_dec_and_test(&rreq->nr_copy_ops))
+               netfs_rreq_unmark_after_write(rreq, was_async);
+
+       netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
+}
+
+/*
+ * Perform any outstanding writes to the cache.  We inherit a ref from the
+ * caller.
+ */
+static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
+{
+       struct netfs_cache_resources *cres = &rreq->cache_resources;
+       struct netfs_io_subrequest *subreq, *next, *p;
+       struct iov_iter iter;
+       int ret;
+
+       trace_netfs_rreq(rreq, netfs_rreq_trace_copy);
+
+       /* We don't want terminating writes trying to wake us up whilst we're
+        * still going through the list.
+        */
+       atomic_inc(&rreq->nr_copy_ops);
+
+       list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
+               if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
+                       list_del_init(&subreq->rreq_link);
+                       netfs_put_subrequest(subreq, false,
+                                            netfs_sreq_trace_put_no_copy);
+               }
+       }
+
+       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+               /* Amalgamate adjacent writes */
+               while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
+                       next = list_next_entry(subreq, rreq_link);
+                       if (next->start != subreq->start + subreq->len)
+                               break;
+                       subreq->len += next->len;
+                       list_del_init(&next->rreq_link);
+                       netfs_put_subrequest(next, false,
+                                            netfs_sreq_trace_put_merged);
+               }
+
+               ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
+                                              rreq->i_size, true);
+               if (ret < 0) {
+                       trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
+                       trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
+                       continue;
+               }
+
+               iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages,
+                               subreq->start, subreq->len);
+
+               atomic_inc(&rreq->nr_copy_ops);
+               netfs_stat(&netfs_n_rh_write);
+               netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache);
+               trace_netfs_sreq(subreq, netfs_sreq_trace_write);
+               cres->ops->write(cres, subreq->start, &iter,
+                                netfs_rreq_copy_terminated, subreq);
+       }
+
+       /* If we decrement nr_copy_ops to 0, the usage ref belongs to us. */
+       if (atomic_dec_and_test(&rreq->nr_copy_ops))
+               netfs_rreq_unmark_after_write(rreq, false);
+}
+
+static void netfs_rreq_write_to_cache_work(struct work_struct *work)
+{
+       struct netfs_io_request *rreq =
+               container_of(work, struct netfs_io_request, work);
+
+       netfs_rreq_do_write_to_cache(rreq);
+}
+
+static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq)
+{
+       rreq->work.func = netfs_rreq_write_to_cache_work;
+       if (!queue_work(system_unbound_wq, &rreq->work))
+               BUG();
+}
+
+/*
+ * Handle a short read.
+ */
+static void netfs_rreq_short_read(struct netfs_io_request *rreq,
+                                 struct netfs_io_subrequest *subreq)
+{
+       __clear_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
+       __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags);
+
+       netfs_stat(&netfs_n_rh_short_read);
+       trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short);
+
+       netfs_get_subrequest(subreq, netfs_sreq_trace_get_short_read);
+       atomic_inc(&rreq->nr_outstanding);
+       if (subreq->source == NETFS_READ_FROM_CACHE)
+               netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR);
+       else
+               netfs_read_from_server(rreq, subreq);
+}
+
+/*
+ * Resubmit any short or failed operations.  Returns true if we got the rreq
+ * ref back.
+ */
+static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq)
+{
+       struct netfs_io_subrequest *subreq;
+
+       WARN_ON(in_interrupt());
+
+       trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
+
+       /* We don't want terminating submissions trying to wake us up whilst
+        * we're still going through the list.
+        */
+       atomic_inc(&rreq->nr_outstanding);
+
+       __clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
+       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+               if (subreq->error) {
+                       if (subreq->source != NETFS_READ_FROM_CACHE)
+                               break;
+                       subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
+                       subreq->error = 0;
+                       netfs_stat(&netfs_n_rh_download_instead);
+                       trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
+                       netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+                       atomic_inc(&rreq->nr_outstanding);
+                       netfs_read_from_server(rreq, subreq);
+               } else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) {
+                       netfs_rreq_short_read(rreq, subreq);
+               }
+       }
+
+       /* If we decrement nr_outstanding to 0, the usage ref belongs to us. */
+       if (atomic_dec_and_test(&rreq->nr_outstanding))
+               return true;
+
+       wake_up_var(&rreq->nr_outstanding);
+       return false;
+}
+
+/*
+ * Check to see if the data read is still valid.
+ */
+static void netfs_rreq_is_still_valid(struct netfs_io_request *rreq)
+{
+       struct netfs_io_subrequest *subreq;
+
+       if (!rreq->netfs_ops->is_still_valid ||
+           rreq->netfs_ops->is_still_valid(rreq))
+               return;
+
+       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+               if (subreq->source == NETFS_READ_FROM_CACHE) {
+                       subreq->error = -ESTALE;
+                       __set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
+               }
+       }
+}
+
+/*
+ * Assess the state of a read request and decide what to do next.
+ *
+ * Note that we could be in an ordinary kernel thread, on a workqueue or in
+ * softirq context at this point.  We inherit a ref from the caller.
+ */
+static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
+{
+       trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
+
+again:
+       netfs_rreq_is_still_valid(rreq);
+
+       if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) &&
+           test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) {
+               if (netfs_rreq_perform_resubmissions(rreq))
+                       goto again;
+               return;
+       }
+
+       netfs_rreq_unlock_folios(rreq);
+
+       clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
+       wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
+
+       if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags))
+               return netfs_rreq_write_to_cache(rreq);
+
+       netfs_rreq_completed(rreq, was_async);
+}
+
+static void netfs_rreq_work(struct work_struct *work)
+{
+       struct netfs_io_request *rreq =
+               container_of(work, struct netfs_io_request, work);
+       netfs_rreq_assess(rreq, false);
+}
+
+/*
+ * Handle the completion of all outstanding I/O operations on a read request.
+ * We inherit a ref from the caller.
+ */
+static void netfs_rreq_terminated(struct netfs_io_request *rreq,
+                                 bool was_async)
+{
+       if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) &&
+           was_async) {
+               if (!queue_work(system_unbound_wq, &rreq->work))
+                       BUG();
+       } else {
+               netfs_rreq_assess(rreq, was_async);
+       }
+}
+
+/**
+ * netfs_subreq_terminated - Note the termination of an I/O operation.
+ * @subreq: The I/O request that has terminated.
+ * @transferred_or_error: The amount of data transferred or an error code.
+ * @was_async: The termination was asynchronous
+ *
+ * This tells the read helper that a contributory I/O operation has terminated,
+ * one way or another, and that it should integrate the results.
+ *
+ * The caller indicates in @transferred_or_error the outcome of the operation,
+ * supplying a positive value to indicate the number of bytes transferred, 0 to
+ * indicate a failure to transfer anything that should be retried or a negative
+ * error code.  The helper will look after reissuing I/O operations as
+ * appropriate and writing downloaded data to the cache.
+ *
+ * If @was_async is true, the caller might be running in softirq or interrupt
+ * context and we can't sleep.
+ */
+void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
+                            ssize_t transferred_or_error,
+                            bool was_async)
+{
+       struct netfs_io_request *rreq = subreq->rreq;
+       int u;
+
+       _enter("[%u]{%llx,%lx},%zd",
+              subreq->debug_index, subreq->start, subreq->flags,
+              transferred_or_error);
+
+       switch (subreq->source) {
+       case NETFS_READ_FROM_CACHE:
+               netfs_stat(&netfs_n_rh_read_done);
+               break;
+       case NETFS_DOWNLOAD_FROM_SERVER:
+               netfs_stat(&netfs_n_rh_download_done);
+               break;
+       default:
+               break;
+       }
+
+       if (IS_ERR_VALUE(transferred_or_error)) {
+               subreq->error = transferred_or_error;
+               trace_netfs_failure(rreq, subreq, transferred_or_error,
+                                   netfs_fail_read);
+               goto failed;
+       }
+
+       if (WARN(transferred_or_error > subreq->len - subreq->transferred,
+                "Subreq overread: R%x[%x] %zd > %zu - %zu",
+                rreq->debug_id, subreq->debug_index,
+                transferred_or_error, subreq->len, subreq->transferred))
+               transferred_or_error = subreq->len - subreq->transferred;
+
+       subreq->error = 0;
+       subreq->transferred += transferred_or_error;
+       if (subreq->transferred < subreq->len)
+               goto incomplete;
+
+complete:
+       __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
+       if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
+               set_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
+
+out:
+       trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
+
+       /* If we decrement nr_outstanding to 0, the ref belongs to us. */
+       u = atomic_dec_return(&rreq->nr_outstanding);
+       if (u == 0)
+               netfs_rreq_terminated(rreq, was_async);
+       else if (u == 1)
+               wake_up_var(&rreq->nr_outstanding);
+
+       netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
+       return;
+
+incomplete:
+       if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
+               netfs_clear_unread(subreq);
+               subreq->transferred = subreq->len;
+               goto complete;
+       }
+
+       if (transferred_or_error == 0) {
+               if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
+                       subreq->error = -ENODATA;
+                       goto failed;
+               }
+       } else {
+               __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
+       }
+
+       __set_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
+       set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
+       goto out;
+
+failed:
+       if (subreq->source == NETFS_READ_FROM_CACHE) {
+               netfs_stat(&netfs_n_rh_read_failed);
+               set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
+       } else {
+               netfs_stat(&netfs_n_rh_download_failed);
+               set_bit(NETFS_RREQ_FAILED, &rreq->flags);
+               rreq->error = subreq->error;
+       }
+       goto out;
+}
+EXPORT_SYMBOL(netfs_subreq_terminated);
+
+static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_subrequest *subreq,
+                                                      loff_t i_size)
+{
+       struct netfs_io_request *rreq = subreq->rreq;
+       struct netfs_cache_resources *cres = &rreq->cache_resources;
+
+       if (cres->ops)
+               return cres->ops->prepare_read(subreq, i_size);
+       if (subreq->start >= rreq->i_size)
+               return NETFS_FILL_WITH_ZEROES;
+       return NETFS_DOWNLOAD_FROM_SERVER;
+}
+
+/*
+ * Work out what sort of subrequest the next one will be.
+ */
+static enum netfs_io_source
+netfs_rreq_prepare_read(struct netfs_io_request *rreq,
+                       struct netfs_io_subrequest *subreq)
+{
+       enum netfs_io_source source;
+
+       _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
+
+       source = netfs_cache_prepare_read(subreq, rreq->i_size);
+       if (source == NETFS_INVALID_READ)
+               goto out;
+
+       if (source == NETFS_DOWNLOAD_FROM_SERVER) {
+               /* Call out to the netfs to let it shrink the request to fit
+                * its own I/O sizes and boundaries.  If it shinks it here, it
+                * will be called again to make simultaneous calls; if it wants
+                * to make serial calls, it can indicate a short read and then
+                * we will call it again.
+                */
+               if (subreq->len > rreq->i_size - subreq->start)
+                       subreq->len = rreq->i_size - subreq->start;
+
+               if (rreq->netfs_ops->clamp_length &&
+                   !rreq->netfs_ops->clamp_length(subreq)) {
+                       source = NETFS_INVALID_READ;
+                       goto out;
+               }
+       }
+
+       if (WARN_ON(subreq->len == 0))
+               source = NETFS_INVALID_READ;
+
+out:
+       subreq->source = source;
+       trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
+       return source;
+}
+
+/*
+ * Slice off a piece of a read request and submit an I/O request for it.
+ */
+static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
+                                   unsigned int *_debug_index)
+{
+       struct netfs_io_subrequest *subreq;
+       enum netfs_io_source source;
+
+       subreq = netfs_alloc_subrequest(rreq);
+       if (!subreq)
+               return false;
+
+       subreq->debug_index     = (*_debug_index)++;
+       subreq->start           = rreq->start + rreq->submitted;
+       subreq->len             = rreq->len   - rreq->submitted;
+
+       _debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted);
+       list_add_tail(&subreq->rreq_link, &rreq->subrequests);
+
+       /* Call out to the cache to find out what it can do with the remaining
+        * subset.  It tells us in subreq->flags what it decided should be done
+        * and adjusts subreq->len down if the subset crosses a cache boundary.
+        *
+        * Then when we hand the subset, it can choose to take a subset of that
+        * (the starts must coincide), in which case, we go around the loop
+        * again and ask it to download the next piece.
+        */
+       source = netfs_rreq_prepare_read(rreq, subreq);
+       if (source == NETFS_INVALID_READ)
+               goto subreq_failed;
+
+       atomic_inc(&rreq->nr_outstanding);
+
+       rreq->submitted += subreq->len;
+
+       trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+       switch (source) {
+       case NETFS_FILL_WITH_ZEROES:
+               netfs_fill_with_zeroes(rreq, subreq);
+               break;
+       case NETFS_DOWNLOAD_FROM_SERVER:
+               netfs_read_from_server(rreq, subreq);
+               break;
+       case NETFS_READ_FROM_CACHE:
+               netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_IGNORE);
+               break;
+       default:
+               BUG();
+       }
+
+       return true;
+
+subreq_failed:
+       rreq->error = subreq->error;
+       netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_failed);
+       return false;
+}
+
+/*
+ * Begin the process of reading in a chunk of data, where that data may be
+ * stitched together from multiple sources, including multiple servers and the
+ * local cache.
+ */
+int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
+{
+       unsigned int debug_index = 0;
+       int ret;
+
+       _enter("R=%x %llx-%llx",
+              rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
+
+       if (rreq->len == 0) {
+               pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
+               netfs_put_request(rreq, false, netfs_rreq_trace_put_zero_len);
+               return -EIO;
+       }
+
+       INIT_WORK(&rreq->work, netfs_rreq_work);
+
+       if (sync)
+               netfs_get_request(rreq, netfs_rreq_trace_get_hold);
+
+       /* Chop the read into slices according to what the cache and the netfs
+        * want and submit each one.
+        */
+       atomic_set(&rreq->nr_outstanding, 1);
+       do {
+               if (!netfs_rreq_submit_slice(rreq, &debug_index))
+                       break;
+
+       } while (rreq->submitted < rreq->len);
+
+       if (sync) {
+               /* Keep nr_outstanding incremented so that the ref always belongs to
+                * us, and the service code isn't punted off to a random thread pool to
+                * process.
+                */
+               for (;;) {
+                       wait_var_event(&rreq->nr_outstanding,
+                                      atomic_read(&rreq->nr_outstanding) == 1);
+                       netfs_rreq_assess(rreq, false);
+                       if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
+                               break;
+                       cond_resched();
+               }
+
+               ret = rreq->error;
+               if (ret == 0 && rreq->submitted < rreq->len) {
+                       trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
+                       ret = -EIO;
+               }
+               netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
+       } else {
+               /* If we decrement nr_outstanding to 0, the ref belongs to us. */
+               if (atomic_dec_and_test(&rreq->nr_outstanding))
+                       netfs_rreq_assess(rreq, false);
+               ret = 0;
+       }
+       return ret;
+}
diff --git a/fs/netfs/main.c b/fs/netfs/main.c
new file mode 100644 (file)
index 0000000..0685687
--- /dev/null
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Miscellaneous bits for the netfs support library.
+ *
+ * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/module.h>
+#include <linux/export.h>
+#include "internal.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/netfs.h>
+
+MODULE_DESCRIPTION("Network fs support");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+
+unsigned netfs_debug;
+module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
new file mode 100644 (file)
index 0000000..e86107b
--- /dev/null
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Object lifetime handling and tracing.
+ *
+ * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/slab.h>
+#include "internal.h"
+
+/*
+ * Allocate an I/O request and initialise it.
+ */
+struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
+                                            struct file *file,
+                                            loff_t start, size_t len,
+                                            enum netfs_io_origin origin)
+{
+       static atomic_t debug_ids;
+       struct inode *inode = file ? file_inode(file) : mapping->host;
+       struct netfs_i_context *ctx = netfs_i_context(inode);
+       struct netfs_io_request *rreq;
+       int ret;
+
+       rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL);
+       if (!rreq)
+               return ERR_PTR(-ENOMEM);
+
+       rreq->start     = start;
+       rreq->len       = len;
+       rreq->origin    = origin;
+       rreq->netfs_ops = ctx->ops;
+       rreq->mapping   = mapping;
+       rreq->inode     = inode;
+       rreq->i_size    = i_size_read(inode);
+       rreq->debug_id  = atomic_inc_return(&debug_ids);
+       INIT_LIST_HEAD(&rreq->subrequests);
+       refcount_set(&rreq->ref, 1);
+       __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
+       if (rreq->netfs_ops->init_request) {
+               ret = rreq->netfs_ops->init_request(rreq, file);
+               if (ret < 0) {
+                       kfree(rreq);
+                       return ERR_PTR(ret);
+               }
+       }
+
+       netfs_stat(&netfs_n_rh_rreq);
+       return rreq;
+}
+
+void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what)
+{
+       int r;
+
+       __refcount_inc(&rreq->ref, &r);
+       trace_netfs_rreq_ref(rreq->debug_id, r + 1, what);
+}
+
+void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async)
+{
+       struct netfs_io_subrequest *subreq;
+
+       while (!list_empty(&rreq->subrequests)) {
+               subreq = list_first_entry(&rreq->subrequests,
+                                         struct netfs_io_subrequest, rreq_link);
+               list_del(&subreq->rreq_link);
+               netfs_put_subrequest(subreq, was_async,
+                                    netfs_sreq_trace_put_clear);
+       }
+}
+
+static void netfs_free_request(struct work_struct *work)
+{
+       struct netfs_io_request *rreq =
+               container_of(work, struct netfs_io_request, work);
+
+       netfs_clear_subrequests(rreq, false);
+       if (rreq->netfs_priv)
+               rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv);
+       trace_netfs_rreq(rreq, netfs_rreq_trace_free);
+       if (rreq->cache_resources.ops)
+               rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
+       kfree(rreq);
+       netfs_stat_d(&netfs_n_rh_rreq);
+}
+
+void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
+                      enum netfs_rreq_ref_trace what)
+{
+       unsigned int debug_id = rreq->debug_id;
+       bool dead;
+       int r;
+
+       dead = __refcount_dec_and_test(&rreq->ref, &r);
+       trace_netfs_rreq_ref(debug_id, r - 1, what);
+       if (dead) {
+               if (was_async) {
+                       rreq->work.func = netfs_free_request;
+                       if (!queue_work(system_unbound_wq, &rreq->work))
+                               BUG();
+               } else {
+                       netfs_free_request(&rreq->work);
+               }
+       }
+}
+
+/*
+ * Allocate and partially initialise an I/O request structure.
+ */
+struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq)
+{
+       struct netfs_io_subrequest *subreq;
+
+       subreq = kzalloc(sizeof(struct netfs_io_subrequest), GFP_KERNEL);
+       if (subreq) {
+               INIT_LIST_HEAD(&subreq->rreq_link);
+               refcount_set(&subreq->ref, 2);
+               subreq->rreq = rreq;
+               netfs_get_request(rreq, netfs_rreq_trace_get_subreq);
+               netfs_stat(&netfs_n_rh_sreq);
+       }
+
+       return subreq;
+}
+
+void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
+                         enum netfs_sreq_ref_trace what)
+{
+       int r;
+
+       __refcount_inc(&subreq->ref, &r);
+       trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, r + 1,
+                            what);
+}
+
+static void netfs_free_subrequest(struct netfs_io_subrequest *subreq,
+                                 bool was_async)
+{
+       struct netfs_io_request *rreq = subreq->rreq;
+
+       trace_netfs_sreq(subreq, netfs_sreq_trace_free);
+       kfree(subreq);
+       netfs_stat_d(&netfs_n_rh_sreq);
+       netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq);
+}
+
+void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async,
+                         enum netfs_sreq_ref_trace what)
+{
+       unsigned int debug_index = subreq->debug_index;
+       unsigned int debug_id = subreq->rreq->debug_id;
+       bool dead;
+       int r;
+
+       dead = __refcount_dec_and_test(&subreq->ref, &r);
+       trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what);
+       if (dead)
+               netfs_free_subrequest(subreq, was_async);
+}
diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c
deleted file mode 100644 (file)
index 501da99..0000000
+++ /dev/null
@@ -1,1205 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Network filesystem high-level read support.
- *
- * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#include <linux/module.h>
-#include <linux/export.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/slab.h>
-#include <linux/uio.h>
-#include <linux/sched/mm.h>
-#include <linux/task_io_accounting_ops.h>
-#include <linux/netfs.h>
-#include "internal.h"
-#define CREATE_TRACE_POINTS
-#include <trace/events/netfs.h>
-
-MODULE_DESCRIPTION("Network fs support");
-MODULE_AUTHOR("Red Hat, Inc.");
-MODULE_LICENSE("GPL");
-
-unsigned netfs_debug;
-module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
-
-static void netfs_rreq_work(struct work_struct *);
-static void __netfs_put_subrequest(struct netfs_read_subrequest *, bool);
-
-static void netfs_put_subrequest(struct netfs_read_subrequest *subreq,
-                                bool was_async)
-{
-       if (refcount_dec_and_test(&subreq->usage))
-               __netfs_put_subrequest(subreq, was_async);
-}
-
-static struct netfs_read_request *netfs_alloc_read_request(
-       const struct netfs_read_request_ops *ops, void *netfs_priv,
-       struct file *file)
-{
-       static atomic_t debug_ids;
-       struct netfs_read_request *rreq;
-
-       rreq = kzalloc(sizeof(struct netfs_read_request), GFP_KERNEL);
-       if (rreq) {
-               rreq->netfs_ops = ops;
-               rreq->netfs_priv = netfs_priv;
-               rreq->inode     = file_inode(file);
-               rreq->i_size    = i_size_read(rreq->inode);
-               rreq->debug_id  = atomic_inc_return(&debug_ids);
-               INIT_LIST_HEAD(&rreq->subrequests);
-               INIT_WORK(&rreq->work, netfs_rreq_work);
-               refcount_set(&rreq->usage, 1);
-               __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
-               if (ops->init_rreq)
-                       ops->init_rreq(rreq, file);
-               netfs_stat(&netfs_n_rh_rreq);
-       }
-
-       return rreq;
-}
-
-static void netfs_get_read_request(struct netfs_read_request *rreq)
-{
-       refcount_inc(&rreq->usage);
-}
-
-static void netfs_rreq_clear_subreqs(struct netfs_read_request *rreq,
-                                    bool was_async)
-{
-       struct netfs_read_subrequest *subreq;
-
-       while (!list_empty(&rreq->subrequests)) {
-               subreq = list_first_entry(&rreq->subrequests,
-                                         struct netfs_read_subrequest, rreq_link);
-               list_del(&subreq->rreq_link);
-               netfs_put_subrequest(subreq, was_async);
-       }
-}
-
-static void netfs_free_read_request(struct work_struct *work)
-{
-       struct netfs_read_request *rreq =
-               container_of(work, struct netfs_read_request, work);
-       netfs_rreq_clear_subreqs(rreq, false);
-       if (rreq->netfs_priv)
-               rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv);
-       trace_netfs_rreq(rreq, netfs_rreq_trace_free);
-       if (rreq->cache_resources.ops)
-               rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
-       kfree(rreq);
-       netfs_stat_d(&netfs_n_rh_rreq);
-}
-
-static void netfs_put_read_request(struct netfs_read_request *rreq, bool was_async)
-{
-       if (refcount_dec_and_test(&rreq->usage)) {
-               if (was_async) {
-                       rreq->work.func = netfs_free_read_request;
-                       if (!queue_work(system_unbound_wq, &rreq->work))
-                               BUG();
-               } else {
-                       netfs_free_read_request(&rreq->work);
-               }
-       }
-}
-
-/*
- * Allocate and partially initialise an I/O request structure.
- */
-static struct netfs_read_subrequest *netfs_alloc_subrequest(
-       struct netfs_read_request *rreq)
-{
-       struct netfs_read_subrequest *subreq;
-
-       subreq = kzalloc(sizeof(struct netfs_read_subrequest), GFP_KERNEL);
-       if (subreq) {
-               INIT_LIST_HEAD(&subreq->rreq_link);
-               refcount_set(&subreq->usage, 2);
-               subreq->rreq = rreq;
-               netfs_get_read_request(rreq);
-               netfs_stat(&netfs_n_rh_sreq);
-       }
-
-       return subreq;
-}
-
-static void netfs_get_read_subrequest(struct netfs_read_subrequest *subreq)
-{
-       refcount_inc(&subreq->usage);
-}
-
-static void __netfs_put_subrequest(struct netfs_read_subrequest *subreq,
-                                  bool was_async)
-{
-       struct netfs_read_request *rreq = subreq->rreq;
-
-       trace_netfs_sreq(subreq, netfs_sreq_trace_free);
-       kfree(subreq);
-       netfs_stat_d(&netfs_n_rh_sreq);
-       netfs_put_read_request(rreq, was_async);
-}
-
-/*
- * Clear the unread part of an I/O request.
- */
-static void netfs_clear_unread(struct netfs_read_subrequest *subreq)
-{
-       struct iov_iter iter;
-
-       iov_iter_xarray(&iter, READ, &subreq->rreq->mapping->i_pages,
-                       subreq->start + subreq->transferred,
-                       subreq->len   - subreq->transferred);
-       iov_iter_zero(iov_iter_count(&iter), &iter);
-}
-
-static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
-                                       bool was_async)
-{
-       struct netfs_read_subrequest *subreq = priv;
-
-       netfs_subreq_terminated(subreq, transferred_or_error, was_async);
-}
-
-/*
- * Issue a read against the cache.
- * - Eats the caller's ref on subreq.
- */
-static void netfs_read_from_cache(struct netfs_read_request *rreq,
-                                 struct netfs_read_subrequest *subreq,
-                                 enum netfs_read_from_hole read_hole)
-{
-       struct netfs_cache_resources *cres = &rreq->cache_resources;
-       struct iov_iter iter;
-
-       netfs_stat(&netfs_n_rh_read);
-       iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
-                       subreq->start + subreq->transferred,
-                       subreq->len   - subreq->transferred);
-
-       cres->ops->read(cres, subreq->start, &iter, read_hole,
-                       netfs_cache_read_terminated, subreq);
-}
-
-/*
- * Fill a subrequest region with zeroes.
- */
-static void netfs_fill_with_zeroes(struct netfs_read_request *rreq,
-                                  struct netfs_read_subrequest *subreq)
-{
-       netfs_stat(&netfs_n_rh_zero);
-       __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
-       netfs_subreq_terminated(subreq, 0, false);
-}
-
-/*
- * Ask the netfs to issue a read request to the server for us.
- *
- * The netfs is expected to read from subreq->pos + subreq->transferred to
- * subreq->pos + subreq->len - 1.  It may not backtrack and write data into the
- * buffer prior to the transferred point as it might clobber dirty data
- * obtained from the cache.
- *
- * Alternatively, the netfs is allowed to indicate one of two things:
- *
- * - NETFS_SREQ_SHORT_READ: A short read - it will get called again to try and
- *   make progress.
- *
- * - NETFS_SREQ_CLEAR_TAIL: A short read - the rest of the buffer will be
- *   cleared.
- */
-static void netfs_read_from_server(struct netfs_read_request *rreq,
-                                  struct netfs_read_subrequest *subreq)
-{
-       netfs_stat(&netfs_n_rh_download);
-       rreq->netfs_ops->issue_op(subreq);
-}
-
-/*
- * Release those waiting.
- */
-static void netfs_rreq_completed(struct netfs_read_request *rreq, bool was_async)
-{
-       trace_netfs_rreq(rreq, netfs_rreq_trace_done);
-       netfs_rreq_clear_subreqs(rreq, was_async);
-       netfs_put_read_request(rreq, was_async);
-}
-
-/*
- * Deal with the completion of writing the data to the cache.  We have to clear
- * the PG_fscache bits on the folios involved and release the caller's ref.
- *
- * May be called in softirq mode and we inherit a ref from the caller.
- */
-static void netfs_rreq_unmark_after_write(struct netfs_read_request *rreq,
-                                         bool was_async)
-{
-       struct netfs_read_subrequest *subreq;
-       struct folio *folio;
-       pgoff_t unlocked = 0;
-       bool have_unlocked = false;
-
-       rcu_read_lock();
-
-       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
-               XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
-
-               xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
-                       /* We might have multiple writes from the same huge
-                        * folio, but we mustn't unlock a folio more than once.
-                        */
-                       if (have_unlocked && folio_index(folio) <= unlocked)
-                               continue;
-                       unlocked = folio_index(folio);
-                       folio_end_fscache(folio);
-                       have_unlocked = true;
-               }
-       }
-
-       rcu_read_unlock();
-       netfs_rreq_completed(rreq, was_async);
-}
-
-static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
-                                      bool was_async)
-{
-       struct netfs_read_subrequest *subreq = priv;
-       struct netfs_read_request *rreq = subreq->rreq;
-
-       if (IS_ERR_VALUE(transferred_or_error)) {
-               netfs_stat(&netfs_n_rh_write_failed);
-               trace_netfs_failure(rreq, subreq, transferred_or_error,
-                                   netfs_fail_copy_to_cache);
-       } else {
-               netfs_stat(&netfs_n_rh_write_done);
-       }
-
-       trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
-
-       /* If we decrement nr_wr_ops to 0, the ref belongs to us. */
-       if (atomic_dec_and_test(&rreq->nr_wr_ops))
-               netfs_rreq_unmark_after_write(rreq, was_async);
-
-       netfs_put_subrequest(subreq, was_async);
-}
-
-/*
- * Perform any outstanding writes to the cache.  We inherit a ref from the
- * caller.
- */
-static void netfs_rreq_do_write_to_cache(struct netfs_read_request *rreq)
-{
-       struct netfs_cache_resources *cres = &rreq->cache_resources;
-       struct netfs_read_subrequest *subreq, *next, *p;
-       struct iov_iter iter;
-       int ret;
-
-       trace_netfs_rreq(rreq, netfs_rreq_trace_write);
-
-       /* We don't want terminating writes trying to wake us up whilst we're
-        * still going through the list.
-        */
-       atomic_inc(&rreq->nr_wr_ops);
-
-       list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
-               if (!test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags)) {
-                       list_del_init(&subreq->rreq_link);
-                       netfs_put_subrequest(subreq, false);
-               }
-       }
-
-       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
-               /* Amalgamate adjacent writes */
-               while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
-                       next = list_next_entry(subreq, rreq_link);
-                       if (next->start != subreq->start + subreq->len)
-                               break;
-                       subreq->len += next->len;
-                       list_del_init(&next->rreq_link);
-                       netfs_put_subrequest(next, false);
-               }
-
-               ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
-                                              rreq->i_size, true);
-               if (ret < 0) {
-                       trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
-                       trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
-                       continue;
-               }
-
-               iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages,
-                               subreq->start, subreq->len);
-
-               atomic_inc(&rreq->nr_wr_ops);
-               netfs_stat(&netfs_n_rh_write);
-               netfs_get_read_subrequest(subreq);
-               trace_netfs_sreq(subreq, netfs_sreq_trace_write);
-               cres->ops->write(cres, subreq->start, &iter,
-                                netfs_rreq_copy_terminated, subreq);
-       }
-
-       /* If we decrement nr_wr_ops to 0, the usage ref belongs to us. */
-       if (atomic_dec_and_test(&rreq->nr_wr_ops))
-               netfs_rreq_unmark_after_write(rreq, false);
-}
-
-static void netfs_rreq_write_to_cache_work(struct work_struct *work)
-{
-       struct netfs_read_request *rreq =
-               container_of(work, struct netfs_read_request, work);
-
-       netfs_rreq_do_write_to_cache(rreq);
-}
-
-static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq)
-{
-       rreq->work.func = netfs_rreq_write_to_cache_work;
-       if (!queue_work(system_unbound_wq, &rreq->work))
-               BUG();
-}
-
-/*
- * Unlock the folios in a read operation.  We need to set PG_fscache on any
- * folios we're going to write back before we unlock them.
- */
-static void netfs_rreq_unlock(struct netfs_read_request *rreq)
-{
-       struct netfs_read_subrequest *subreq;
-       struct folio *folio;
-       unsigned int iopos, account = 0;
-       pgoff_t start_page = rreq->start / PAGE_SIZE;
-       pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
-       bool subreq_failed = false;
-
-       XA_STATE(xas, &rreq->mapping->i_pages, start_page);
-
-       if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) {
-               __clear_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
-               list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
-                       __clear_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags);
-               }
-       }
-
-       /* Walk through the pagecache and the I/O request lists simultaneously.
-        * We may have a mixture of cached and uncached sections and we only
-        * really want to write out the uncached sections.  This is slightly
-        * complicated by the possibility that we might have huge pages with a
-        * mixture inside.
-        */
-       subreq = list_first_entry(&rreq->subrequests,
-                                 struct netfs_read_subrequest, rreq_link);
-       iopos = 0;
-       subreq_failed = (subreq->error < 0);
-
-       trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
-
-       rcu_read_lock();
-       xas_for_each(&xas, folio, last_page) {
-               unsigned int pgpos = (folio_index(folio) - start_page) * PAGE_SIZE;
-               unsigned int pgend = pgpos + folio_size(folio);
-               bool pg_failed = false;
-
-               for (;;) {
-                       if (!subreq) {
-                               pg_failed = true;
-                               break;
-                       }
-                       if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags))
-                               folio_start_fscache(folio);
-                       pg_failed |= subreq_failed;
-                       if (pgend < iopos + subreq->len)
-                               break;
-
-                       account += subreq->transferred;
-                       iopos += subreq->len;
-                       if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
-                               subreq = list_next_entry(subreq, rreq_link);
-                               subreq_failed = (subreq->error < 0);
-                       } else {
-                               subreq = NULL;
-                               subreq_failed = false;
-                       }
-                       if (pgend == iopos)
-                               break;
-               }
-
-               if (!pg_failed) {
-                       flush_dcache_folio(folio);
-                       folio_mark_uptodate(folio);
-               }
-
-               if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
-                       if (folio_index(folio) == rreq->no_unlock_folio &&
-                           test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
-                               _debug("no unlock");
-                       else
-                               folio_unlock(folio);
-               }
-       }
-       rcu_read_unlock();
-
-       task_io_account_read(account);
-       if (rreq->netfs_ops->done)
-               rreq->netfs_ops->done(rreq);
-}
-
-/*
- * Handle a short read.
- */
-static void netfs_rreq_short_read(struct netfs_read_request *rreq,
-                                 struct netfs_read_subrequest *subreq)
-{
-       __clear_bit(NETFS_SREQ_SHORT_READ, &subreq->flags);
-       __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags);
-
-       netfs_stat(&netfs_n_rh_short_read);
-       trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short);
-
-       netfs_get_read_subrequest(subreq);
-       atomic_inc(&rreq->nr_rd_ops);
-       if (subreq->source == NETFS_READ_FROM_CACHE)
-               netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR);
-       else
-               netfs_read_from_server(rreq, subreq);
-}
-
-/*
- * Resubmit any short or failed operations.  Returns true if we got the rreq
- * ref back.
- */
-static bool netfs_rreq_perform_resubmissions(struct netfs_read_request *rreq)
-{
-       struct netfs_read_subrequest *subreq;
-
-       WARN_ON(in_interrupt());
-
-       trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
-
-       /* We don't want terminating submissions trying to wake us up whilst
-        * we're still going through the list.
-        */
-       atomic_inc(&rreq->nr_rd_ops);
-
-       __clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
-       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
-               if (subreq->error) {
-                       if (subreq->source != NETFS_READ_FROM_CACHE)
-                               break;
-                       subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
-                       subreq->error = 0;
-                       netfs_stat(&netfs_n_rh_download_instead);
-                       trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
-                       netfs_get_read_subrequest(subreq);
-                       atomic_inc(&rreq->nr_rd_ops);
-                       netfs_read_from_server(rreq, subreq);
-               } else if (test_bit(NETFS_SREQ_SHORT_READ, &subreq->flags)) {
-                       netfs_rreq_short_read(rreq, subreq);
-               }
-       }
-
-       /* If we decrement nr_rd_ops to 0, the usage ref belongs to us. */
-       if (atomic_dec_and_test(&rreq->nr_rd_ops))
-               return true;
-
-       wake_up_var(&rreq->nr_rd_ops);
-       return false;
-}
-
-/*
- * Check to see if the data read is still valid.
- */
-static void netfs_rreq_is_still_valid(struct netfs_read_request *rreq)
-{
-       struct netfs_read_subrequest *subreq;
-
-       if (!rreq->netfs_ops->is_still_valid ||
-           rreq->netfs_ops->is_still_valid(rreq))
-               return;
-
-       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
-               if (subreq->source == NETFS_READ_FROM_CACHE) {
-                       subreq->error = -ESTALE;
-                       __set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
-               }
-       }
-}
-
-/*
- * Assess the state of a read request and decide what to do next.
- *
- * Note that we could be in an ordinary kernel thread, on a workqueue or in
- * softirq context at this point.  We inherit a ref from the caller.
- */
-static void netfs_rreq_assess(struct netfs_read_request *rreq, bool was_async)
-{
-       trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
-
-again:
-       netfs_rreq_is_still_valid(rreq);
-
-       if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) &&
-           test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) {
-               if (netfs_rreq_perform_resubmissions(rreq))
-                       goto again;
-               return;
-       }
-
-       netfs_rreq_unlock(rreq);
-
-       clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
-       wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
-
-       if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags))
-               return netfs_rreq_write_to_cache(rreq);
-
-       netfs_rreq_completed(rreq, was_async);
-}
-
-static void netfs_rreq_work(struct work_struct *work)
-{
-       struct netfs_read_request *rreq =
-               container_of(work, struct netfs_read_request, work);
-       netfs_rreq_assess(rreq, false);
-}
-
-/*
- * Handle the completion of all outstanding I/O operations on a read request.
- * We inherit a ref from the caller.
- */
-static void netfs_rreq_terminated(struct netfs_read_request *rreq,
-                                 bool was_async)
-{
-       if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) &&
-           was_async) {
-               if (!queue_work(system_unbound_wq, &rreq->work))
-                       BUG();
-       } else {
-               netfs_rreq_assess(rreq, was_async);
-       }
-}
-
-/**
- * netfs_subreq_terminated - Note the termination of an I/O operation.
- * @subreq: The I/O request that has terminated.
- * @transferred_or_error: The amount of data transferred or an error code.
- * @was_async: The termination was asynchronous
- *
- * This tells the read helper that a contributory I/O operation has terminated,
- * one way or another, and that it should integrate the results.
- *
- * The caller indicates in @transferred_or_error the outcome of the operation,
- * supplying a positive value to indicate the number of bytes transferred, 0 to
- * indicate a failure to transfer anything that should be retried or a negative
- * error code.  The helper will look after reissuing I/O operations as
- * appropriate and writing downloaded data to the cache.
- *
- * If @was_async is true, the caller might be running in softirq or interrupt
- * context and we can't sleep.
- */
-void netfs_subreq_terminated(struct netfs_read_subrequest *subreq,
-                            ssize_t transferred_or_error,
-                            bool was_async)
-{
-       struct netfs_read_request *rreq = subreq->rreq;
-       int u;
-
-       _enter("[%u]{%llx,%lx},%zd",
-              subreq->debug_index, subreq->start, subreq->flags,
-              transferred_or_error);
-
-       switch (subreq->source) {
-       case NETFS_READ_FROM_CACHE:
-               netfs_stat(&netfs_n_rh_read_done);
-               break;
-       case NETFS_DOWNLOAD_FROM_SERVER:
-               netfs_stat(&netfs_n_rh_download_done);
-               break;
-       default:
-               break;
-       }
-
-       if (IS_ERR_VALUE(transferred_or_error)) {
-               subreq->error = transferred_or_error;
-               trace_netfs_failure(rreq, subreq, transferred_or_error,
-                                   netfs_fail_read);
-               goto failed;
-       }
-
-       if (WARN(transferred_or_error > subreq->len - subreq->transferred,
-                "Subreq overread: R%x[%x] %zd > %zu - %zu",
-                rreq->debug_id, subreq->debug_index,
-                transferred_or_error, subreq->len, subreq->transferred))
-               transferred_or_error = subreq->len - subreq->transferred;
-
-       subreq->error = 0;
-       subreq->transferred += transferred_or_error;
-       if (subreq->transferred < subreq->len)
-               goto incomplete;
-
-complete:
-       __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
-       if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags))
-               set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
-
-out:
-       trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
-
-       /* If we decrement nr_rd_ops to 0, the ref belongs to us. */
-       u = atomic_dec_return(&rreq->nr_rd_ops);
-       if (u == 0)
-               netfs_rreq_terminated(rreq, was_async);
-       else if (u == 1)
-               wake_up_var(&rreq->nr_rd_ops);
-
-       netfs_put_subrequest(subreq, was_async);
-       return;
-
-incomplete:
-       if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
-               netfs_clear_unread(subreq);
-               subreq->transferred = subreq->len;
-               goto complete;
-       }
-
-       if (transferred_or_error == 0) {
-               if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
-                       subreq->error = -ENODATA;
-                       goto failed;
-               }
-       } else {
-               __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
-       }
-
-       __set_bit(NETFS_SREQ_SHORT_READ, &subreq->flags);
-       set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
-       goto out;
-
-failed:
-       if (subreq->source == NETFS_READ_FROM_CACHE) {
-               netfs_stat(&netfs_n_rh_read_failed);
-               set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
-       } else {
-               netfs_stat(&netfs_n_rh_download_failed);
-               set_bit(NETFS_RREQ_FAILED, &rreq->flags);
-               rreq->error = subreq->error;
-       }
-       goto out;
-}
-EXPORT_SYMBOL(netfs_subreq_terminated);
-
-static enum netfs_read_source netfs_cache_prepare_read(struct netfs_read_subrequest *subreq,
-                                                      loff_t i_size)
-{
-       struct netfs_read_request *rreq = subreq->rreq;
-       struct netfs_cache_resources *cres = &rreq->cache_resources;
-
-       if (cres->ops)
-               return cres->ops->prepare_read(subreq, i_size);
-       if (subreq->start >= rreq->i_size)
-               return NETFS_FILL_WITH_ZEROES;
-       return NETFS_DOWNLOAD_FROM_SERVER;
-}
-
-/*
- * Work out what sort of subrequest the next one will be.
- */
-static enum netfs_read_source
-netfs_rreq_prepare_read(struct netfs_read_request *rreq,
-                       struct netfs_read_subrequest *subreq)
-{
-       enum netfs_read_source source;
-
-       _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
-
-       source = netfs_cache_prepare_read(subreq, rreq->i_size);
-       if (source == NETFS_INVALID_READ)
-               goto out;
-
-       if (source == NETFS_DOWNLOAD_FROM_SERVER) {
-               /* Call out to the netfs to let it shrink the request to fit
-                * its own I/O sizes and boundaries.  If it shinks it here, it
-                * will be called again to make simultaneous calls; if it wants
-                * to make serial calls, it can indicate a short read and then
-                * we will call it again.
-                */
-               if (subreq->len > rreq->i_size - subreq->start)
-                       subreq->len = rreq->i_size - subreq->start;
-
-               if (rreq->netfs_ops->clamp_length &&
-                   !rreq->netfs_ops->clamp_length(subreq)) {
-                       source = NETFS_INVALID_READ;
-                       goto out;
-               }
-       }
-
-       if (WARN_ON(subreq->len == 0))
-               source = NETFS_INVALID_READ;
-
-out:
-       subreq->source = source;
-       trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
-       return source;
-}
-
-/*
- * Slice off a piece of a read request and submit an I/O request for it.
- */
-static bool netfs_rreq_submit_slice(struct netfs_read_request *rreq,
-                                   unsigned int *_debug_index)
-{
-       struct netfs_read_subrequest *subreq;
-       enum netfs_read_source source;
-
-       subreq = netfs_alloc_subrequest(rreq);
-       if (!subreq)
-               return false;
-
-       subreq->debug_index     = (*_debug_index)++;
-       subreq->start           = rreq->start + rreq->submitted;
-       subreq->len             = rreq->len   - rreq->submitted;
-
-       _debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted);
-       list_add_tail(&subreq->rreq_link, &rreq->subrequests);
-
-       /* Call out to the cache to find out what it can do with the remaining
-        * subset.  It tells us in subreq->flags what it decided should be done
-        * and adjusts subreq->len down if the subset crosses a cache boundary.
-        *
-        * Then when we hand the subset, it can choose to take a subset of that
-        * (the starts must coincide), in which case, we go around the loop
-        * again and ask it to download the next piece.
-        */
-       source = netfs_rreq_prepare_read(rreq, subreq);
-       if (source == NETFS_INVALID_READ)
-               goto subreq_failed;
-
-       atomic_inc(&rreq->nr_rd_ops);
-
-       rreq->submitted += subreq->len;
-
-       trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
-       switch (source) {
-       case NETFS_FILL_WITH_ZEROES:
-               netfs_fill_with_zeroes(rreq, subreq);
-               break;
-       case NETFS_DOWNLOAD_FROM_SERVER:
-               netfs_read_from_server(rreq, subreq);
-               break;
-       case NETFS_READ_FROM_CACHE:
-               netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_IGNORE);
-               break;
-       default:
-               BUG();
-       }
-
-       return true;
-
-subreq_failed:
-       rreq->error = subreq->error;
-       netfs_put_subrequest(subreq, false);
-       return false;
-}
-
-static void netfs_cache_expand_readahead(struct netfs_read_request *rreq,
-                                        loff_t *_start, size_t *_len, loff_t i_size)
-{
-       struct netfs_cache_resources *cres = &rreq->cache_resources;
-
-       if (cres->ops && cres->ops->expand_readahead)
-               cres->ops->expand_readahead(cres, _start, _len, i_size);
-}
-
-static void netfs_rreq_expand(struct netfs_read_request *rreq,
-                             struct readahead_control *ractl)
-{
-       /* Give the cache a chance to change the request parameters.  The
-        * resultant request must contain the original region.
-        */
-       netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size);
-
-       /* Give the netfs a chance to change the request parameters.  The
-        * resultant request must contain the original region.
-        */
-       if (rreq->netfs_ops->expand_readahead)
-               rreq->netfs_ops->expand_readahead(rreq);
-
-       /* Expand the request if the cache wants it to start earlier.  Note
-        * that the expansion may get further extended if the VM wishes to
-        * insert THPs and the preferred start and/or end wind up in the middle
-        * of THPs.
-        *
-        * If this is the case, however, the THP size should be an integer
-        * multiple of the cache granule size, so we get a whole number of
-        * granules to deal with.
-        */
-       if (rreq->start  != readahead_pos(ractl) ||
-           rreq->len != readahead_length(ractl)) {
-               readahead_expand(ractl, rreq->start, rreq->len);
-               rreq->start  = readahead_pos(ractl);
-               rreq->len = readahead_length(ractl);
-
-               trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
-                                netfs_read_trace_expanded);
-       }
-}
-
-/**
- * netfs_readahead - Helper to manage a read request
- * @ractl: The description of the readahead request
- * @ops: The network filesystem's operations for the helper to use
- * @netfs_priv: Private netfs data to be retained in the request
- *
- * Fulfil a readahead request by drawing data from the cache if possible, or
- * the netfs if not.  Space beyond the EOF is zero-filled.  Multiple I/O
- * requests from different sources will get munged together.  If necessary, the
- * readahead window can be expanded in either direction to a more convenient
- * alighment for RPC efficiency or to make storage in the cache feasible.
- *
- * The calling netfs must provide a table of operations, only one of which,
- * issue_op, is mandatory.  It may also be passed a private token, which will
- * be retained in rreq->netfs_priv and will be cleaned up by ops->cleanup().
- *
- * This is usable whether or not caching is enabled.
- */
-void netfs_readahead(struct readahead_control *ractl,
-                    const struct netfs_read_request_ops *ops,
-                    void *netfs_priv)
-{
-       struct netfs_read_request *rreq;
-       unsigned int debug_index = 0;
-       int ret;
-
-       _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
-
-       if (readahead_count(ractl) == 0)
-               goto cleanup;
-
-       rreq = netfs_alloc_read_request(ops, netfs_priv, ractl->file);
-       if (!rreq)
-               goto cleanup;
-       rreq->mapping   = ractl->mapping;
-       rreq->start     = readahead_pos(ractl);
-       rreq->len       = readahead_length(ractl);
-
-       if (ops->begin_cache_operation) {
-               ret = ops->begin_cache_operation(rreq);
-               if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
-                       goto cleanup_free;
-       }
-
-       netfs_stat(&netfs_n_rh_readahead);
-       trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
-                        netfs_read_trace_readahead);
-
-       netfs_rreq_expand(rreq, ractl);
-
-       atomic_set(&rreq->nr_rd_ops, 1);
-       do {
-               if (!netfs_rreq_submit_slice(rreq, &debug_index))
-                       break;
-
-       } while (rreq->submitted < rreq->len);
-
-       /* Drop the refs on the folios here rather than in the cache or
-        * filesystem.  The locks will be dropped in netfs_rreq_unlock().
-        */
-       while (readahead_folio(ractl))
-               ;
-
-       /* If we decrement nr_rd_ops to 0, the ref belongs to us. */
-       if (atomic_dec_and_test(&rreq->nr_rd_ops))
-               netfs_rreq_assess(rreq, false);
-       return;
-
-cleanup_free:
-       netfs_put_read_request(rreq, false);
-       return;
-cleanup:
-       if (netfs_priv)
-               ops->cleanup(ractl->mapping, netfs_priv);
-       return;
-}
-EXPORT_SYMBOL(netfs_readahead);
-
-/**
- * netfs_readpage - Helper to manage a readpage request
- * @file: The file to read from
- * @folio: The folio to read
- * @ops: The network filesystem's operations for the helper to use
- * @netfs_priv: Private netfs data to be retained in the request
- *
- * Fulfil a readpage request by drawing data from the cache if possible, or the
- * netfs if not.  Space beyond the EOF is zero-filled.  Multiple I/O requests
- * from different sources will get munged together.
- *
- * The calling netfs must provide a table of operations, only one of which,
- * issue_op, is mandatory.  It may also be passed a private token, which will
- * be retained in rreq->netfs_priv and will be cleaned up by ops->cleanup().
- *
- * This is usable whether or not caching is enabled.
- */
-int netfs_readpage(struct file *file,
-                  struct folio *folio,
-                  const struct netfs_read_request_ops *ops,
-                  void *netfs_priv)
-{
-       struct netfs_read_request *rreq;
-       unsigned int debug_index = 0;
-       int ret;
-
-       _enter("%lx", folio_index(folio));
-
-       rreq = netfs_alloc_read_request(ops, netfs_priv, file);
-       if (!rreq) {
-               if (netfs_priv)
-                       ops->cleanup(folio_file_mapping(folio), netfs_priv);
-               folio_unlock(folio);
-               return -ENOMEM;
-       }
-       rreq->mapping   = folio_file_mapping(folio);
-       rreq->start     = folio_file_pos(folio);
-       rreq->len       = folio_size(folio);
-
-       if (ops->begin_cache_operation) {
-               ret = ops->begin_cache_operation(rreq);
-               if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) {
-                       folio_unlock(folio);
-                       goto out;
-               }
-       }
-
-       netfs_stat(&netfs_n_rh_readpage);
-       trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
-
-       netfs_get_read_request(rreq);
-
-       atomic_set(&rreq->nr_rd_ops, 1);
-       do {
-               if (!netfs_rreq_submit_slice(rreq, &debug_index))
-                       break;
-
-       } while (rreq->submitted < rreq->len);
-
-       /* Keep nr_rd_ops incremented so that the ref always belongs to us, and
-        * the service code isn't punted off to a random thread pool to
-        * process.
-        */
-       do {
-               wait_var_event(&rreq->nr_rd_ops, atomic_read(&rreq->nr_rd_ops) == 1);
-               netfs_rreq_assess(rreq, false);
-       } while (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags));
-
-       ret = rreq->error;
-       if (ret == 0 && rreq->submitted < rreq->len) {
-               trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_readpage);
-               ret = -EIO;
-       }
-out:
-       netfs_put_read_request(rreq, false);
-       return ret;
-}
-EXPORT_SYMBOL(netfs_readpage);
-
-/*
- * Prepare a folio for writing without reading first
- * @folio: The folio being prepared
- * @pos: starting position for the write
- * @len: length of write
- *
- * In some cases, write_begin doesn't need to read at all:
- * - full folio write
- * - write that lies in a folio that is completely beyond EOF
- * - write that covers the folio from start to EOF or beyond it
- *
- * If any of these criteria are met, then zero out the unwritten parts
- * of the folio and return true. Otherwise, return false.
- */
-static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len)
-{
-       struct inode *inode = folio_inode(folio);
-       loff_t i_size = i_size_read(inode);
-       size_t offset = offset_in_folio(folio, pos);
-
-       /* Full folio write */
-       if (offset == 0 && len >= folio_size(folio))
-               return true;
-
-       /* pos beyond last folio in the file */
-       if (pos - offset >= i_size)
-               goto zero_out;
-
-       /* Write that covers from the start of the folio to EOF or beyond */
-       if (offset == 0 && (pos + len) >= i_size)
-               goto zero_out;
-
-       return false;
-zero_out:
-       zero_user_segments(&folio->page, 0, offset, offset + len, folio_size(folio));
-       return true;
-}
-
-/**
- * netfs_write_begin - Helper to prepare for writing
- * @file: The file to read from
- * @mapping: The mapping to read from
- * @pos: File position at which the write will begin
- * @len: The length of the write (may extend beyond the end of the folio chosen)
- * @aop_flags: AOP_* flags
- * @_folio: Where to put the resultant folio
- * @_fsdata: Place for the netfs to store a cookie
- * @ops: The network filesystem's operations for the helper to use
- * @netfs_priv: Private netfs data to be retained in the request
- *
- * Pre-read data for a write-begin request by drawing data from the cache if
- * possible, or the netfs if not.  Space beyond the EOF is zero-filled.
- * Multiple I/O requests from different sources will get munged together.  If
- * necessary, the readahead window can be expanded in either direction to a
- * more convenient alighment for RPC efficiency or to make storage in the cache
- * feasible.
- *
- * The calling netfs must provide a table of operations, only one of which,
- * issue_op, is mandatory.
- *
- * The check_write_begin() operation can be provided to check for and flush
- * conflicting writes once the folio is grabbed and locked.  It is passed a
- * pointer to the fsdata cookie that gets returned to the VM to be passed to
- * write_end.  It is permitted to sleep.  It should return 0 if the request
- * should go ahead; unlock the folio and return -EAGAIN to cause the folio to
- * be regot; or return an error.
- *
- * This is usable whether or not caching is enabled.
- */
-int netfs_write_begin(struct file *file, struct address_space *mapping,
-                     loff_t pos, unsigned int len, unsigned int aop_flags,
-                     struct folio **_folio, void **_fsdata,
-                     const struct netfs_read_request_ops *ops,
-                     void *netfs_priv)
-{
-       struct netfs_read_request *rreq;
-       struct folio *folio;
-       struct inode *inode = file_inode(file);
-       unsigned int debug_index = 0, fgp_flags;
-       pgoff_t index = pos >> PAGE_SHIFT;
-       int ret;
-
-       DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
-
-retry:
-       fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
-       if (aop_flags & AOP_FLAG_NOFS)
-               fgp_flags |= FGP_NOFS;
-       folio = __filemap_get_folio(mapping, index, fgp_flags,
-                                   mapping_gfp_mask(mapping));
-       if (!folio)
-               return -ENOMEM;
-
-       if (ops->check_write_begin) {
-               /* Allow the netfs (eg. ceph) to flush conflicts. */
-               ret = ops->check_write_begin(file, pos, len, folio, _fsdata);
-               if (ret < 0) {
-                       trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
-                       if (ret == -EAGAIN)
-                               goto retry;
-                       goto error;
-               }
-       }
-
-       if (folio_test_uptodate(folio))
-               goto have_folio;
-
-       /* If the page is beyond the EOF, we want to clear it - unless it's
-        * within the cache granule containing the EOF, in which case we need
-        * to preload the granule.
-        */
-       if (!ops->is_cache_enabled(inode) &&
-           netfs_skip_folio_read(folio, pos, len)) {
-               netfs_stat(&netfs_n_rh_write_zskip);
-               goto have_folio_no_wait;
-       }
-
-       ret = -ENOMEM;
-       rreq = netfs_alloc_read_request(ops, netfs_priv, file);
-       if (!rreq)
-               goto error;
-       rreq->mapping           = folio_file_mapping(folio);
-       rreq->start             = folio_file_pos(folio);
-       rreq->len               = folio_size(folio);
-       rreq->no_unlock_folio   = folio_index(folio);
-       __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
-       netfs_priv = NULL;
-
-       if (ops->begin_cache_operation) {
-               ret = ops->begin_cache_operation(rreq);
-               if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
-                       goto error_put;
-       }
-
-       netfs_stat(&netfs_n_rh_write_begin);
-       trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
-
-       /* Expand the request to meet caching requirements and download
-        * preferences.
-        */
-       ractl._nr_pages = folio_nr_pages(folio);
-       netfs_rreq_expand(rreq, &ractl);
-       netfs_get_read_request(rreq);
-
-       /* We hold the folio locks, so we can drop the references */
-       folio_get(folio);
-       while (readahead_folio(&ractl))
-               ;
-
-       atomic_set(&rreq->nr_rd_ops, 1);
-       do {
-               if (!netfs_rreq_submit_slice(rreq, &debug_index))
-                       break;
-
-       } while (rreq->submitted < rreq->len);
-
-       /* Keep nr_rd_ops incremented so that the ref always belongs to us, and
-        * the service code isn't punted off to a random thread pool to
-        * process.
-        */
-       for (;;) {
-               wait_var_event(&rreq->nr_rd_ops, atomic_read(&rreq->nr_rd_ops) == 1);
-               netfs_rreq_assess(rreq, false);
-               if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
-                       break;
-               cond_resched();
-       }
-
-       ret = rreq->error;
-       if (ret == 0 && rreq->submitted < rreq->len) {
-               trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_write_begin);
-               ret = -EIO;
-       }
-       netfs_put_read_request(rreq, false);
-       if (ret < 0)
-               goto error;
-
-have_folio:
-       ret = folio_wait_fscache_killable(folio);
-       if (ret < 0)
-               goto error;
-have_folio_no_wait:
-       if (netfs_priv)
-               ops->cleanup(mapping, netfs_priv);
-       *_folio = folio;
-       _leave(" = 0");
-       return 0;
-
-error_put:
-       netfs_put_read_request(rreq, false);
-error:
-       folio_unlock(folio);
-       folio_put(folio);
-       if (netfs_priv)
-               ops->cleanup(mapping, netfs_priv);
-       _leave(" = %d", ret);
-       return ret;
-}
-EXPORT_SYMBOL(netfs_write_begin);
index 9ae538c853787ee4284bbfaab6e9328885a2f564..5510a7a14a40dda1a53d344399852d001252aaa0 100644 (file)
@@ -7,7 +7,6 @@
 
 #include <linux/export.h>
 #include <linux/seq_file.h>
-#include <linux/netfs.h>
 #include "internal.h"
 
 atomic_t netfs_n_rh_readahead;
index 47a53b3362b628001e7d7fa6f21d15f748b01a09..14a72224b6571b9617d586f766e39dcb5f1772eb 100644 (file)
@@ -4,10 +4,6 @@ config NFS_FS
        depends on INET && FILE_LOCKING && MULTIUSER
        select LOCKD
        select SUNRPC
-       select CRYPTO
-       select CRYPTO_HASH
-       select XXHASH
-       select CRYPTO_XXHASH
        select NFS_ACL_SUPPORT if NFS_V3_ACL
        help
          Choose Y here if you want to access files residing on other
index bac4cf1a308efe8c9ef55b582d8fef9ace5d9430..c6b263b5faf1fccb3ac1fe22ac2fb469ae7a740a 100644 (file)
@@ -39,7 +39,7 @@
 #include <linux/sched.h>
 #include <linux/kmemleak.h>
 #include <linux/xattr.h>
-#include <linux/xxhash.h>
+#include <linux/hash.h>
 
 #include "delegation.h"
 #include "iostat.h"
@@ -350,10 +350,7 @@ out:
  * of directory cookies. Content is addressed by the value of the
  * cookie index of the first readdir entry in a page.
  *
- * The xxhash algorithm is chosen because it is fast, and is supposed
- * to result in a decent flat distribution of hashes.
- *
- * We then select only the first 18 bits to avoid issues with excessive
+ * We select only the first 18 bits to avoid issues with excessive
  * memory use for the page cache XArray. 18 bits should allow the caching
  * of 262144 pages of sequences of readdir entries. Since each page holds
  * 127 readdir entries for a typical 64-bit system, that works out to a
@@ -363,7 +360,7 @@ static pgoff_t nfs_readdir_page_cookie_hash(u64 cookie)
 {
        if (cookie == 0)
                return 0;
-       return xxhash(&cookie, sizeof(cookie), 0) & NFS_READDIR_COOKIE_MASK;
+       return hash_64(cookie, 18);
 }
 
 static bool nfs_readdir_page_validate(struct page *page, u64 last_cookie,
@@ -1991,16 +1988,6 @@ const struct dentry_operations nfs4_dentry_operations = {
 };
 EXPORT_SYMBOL_GPL(nfs4_dentry_operations);
 
-static fmode_t flags_to_mode(int flags)
-{
-       fmode_t res = (__force fmode_t)flags & FMODE_EXEC;
-       if ((flags & O_ACCMODE) != O_WRONLY)
-               res |= FMODE_READ;
-       if ((flags & O_ACCMODE) != O_RDONLY)
-               res |= FMODE_WRITE;
-       return res;
-}
-
 static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, int open_flags, struct file *filp)
 {
        return alloc_nfs_open_context(dentry, flags_to_mode(open_flags), filp);
index b0ca244c50d0be6aa0d4993311ef013942880001..150b7fa8f0a73637c7291a174172e1b2ffa8c3c4 100644 (file)
@@ -646,7 +646,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
        result = generic_write_checks(iocb, from);
        if (result > 0) {
                current->backing_dev_info = inode_to_bdi(inode);
-               result = generic_perform_write(file, from, iocb->ki_pos);
+               result = generic_perform_write(iocb, from);
                current->backing_dev_info = NULL;
        }
        nfs_end_io_write(inode);
index 4dee53ceb94168915a5b0b2b417612546efb542b..f73c09a9cf0a9e35a22cd45cfb9b7ccf2d2395a8 100644 (file)
@@ -238,14 +238,6 @@ void nfs_fscache_release_file(struct inode *inode, struct file *filp)
        }
 }
 
-static inline void fscache_end_operation(struct netfs_cache_resources *cres)
-{
-       const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
-
-       if (ops)
-               ops->end_operation(cres);
-}
-
 /*
  * Fallback page reading interface.
  */
index 7eb3b08d702f89311ea44efda0125c331fc2f500..b4e46b0ffa2dc04d268827312286af190fef1ab3 100644 (file)
@@ -1180,7 +1180,6 @@ int nfs_open(struct inode *inode, struct file *filp)
        nfs_fscache_open_file(inode, filp);
        return 0;
 }
-EXPORT_SYMBOL_GPL(nfs_open);
 
 /*
  * This function is called whenever some part of NFS notices that
index 57b0497105c80306c1ddc4652c21d319326234e8..7eefa16ed381bf6f32689ce884d069ba5406d5bd 100644 (file)
@@ -42,6 +42,16 @@ static inline bool nfs_lookup_is_soft_revalidate(const struct dentry *dentry)
        return true;
 }
 
+static inline fmode_t flags_to_mode(int flags)
+{
+       fmode_t res = (__force fmode_t)flags & FMODE_EXEC;
+       if ((flags & O_ACCMODE) != O_WRONLY)
+               res |= FMODE_READ;
+       if ((flags & O_ACCMODE) != O_RDONLY)
+               res |= FMODE_WRITE;
+       return res;
+}
+
 /*
  * Note: RFC 1813 doesn't limit the number of auth flavors that
  * a server can return, so make something up.
index ad3405c64b9e428815fba562792f37fcaa4a8025..e7b34f7e0614b6ff47836777c98fd21a07ba054d 100644 (file)
@@ -997,7 +997,7 @@ int __init nfs4_xattr_cache_init(void)
 
        nfs4_xattr_cache_cachep = kmem_cache_create("nfs4_xattr_cache_cache",
            sizeof(struct nfs4_xattr_cache), 0,
-           (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT),
+           (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
            nfs4_xattr_cache_init_once);
        if (nfs4_xattr_cache_cachep == NULL)
                return -ENOMEM;
index d258933cf8c881ab9242f264cb3b7bc65e658047..7b861e4f0533ac0a83dfa1653e35f4ea36d7c108 100644 (file)
@@ -32,6 +32,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
        struct dentry *parent = NULL;
        struct inode *dir;
        unsigned openflags = filp->f_flags;
+       fmode_t f_mode;
        struct iattr attr;
        int err;
 
@@ -50,8 +51,9 @@ nfs4_file_open(struct inode *inode, struct file *filp)
        if (err)
                return err;
 
+       f_mode = filp->f_mode;
        if ((openflags & O_ACCMODE) == 3)
-               return nfs_open(inode, filp);
+               f_mode |= flags_to_mode(openflags);
 
        /* We can't create new files here */
        openflags &= ~(O_CREAT|O_EXCL);
@@ -59,7 +61,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
        parent = dget_parent(dentry);
        dir = d_inode(parent);
 
-       ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode, filp);
+       ctx = alloc_nfs_open_context(file_dentry(filp), f_mode, filp);
        err = PTR_ERR(ctx);
        if (IS_ERR(ctx))
                goto out;
index e3f5b380cefe95e39ddf0151bdabc3004a27b544..16106f805ffa21a41f7833247c5f019924f4d537 100644 (file)
@@ -9615,6 +9615,8 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
        nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0);
 
        task = rpc_run_task(&task_setup_data);
+       if (IS_ERR(task))
+               return ERR_CAST(task);
 
        status = rpc_wait_for_completion_task(task);
        if (status != 0)
index 5fa11e1aca4c2759358aa37c7c36691c1bc4bcd4..6f325e10056cebecf767d4a988715c1d5b58737c 100644 (file)
@@ -347,6 +347,7 @@ nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
        data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (data == NULL)
                return ERR_PTR(-ENOMEM);
+       task_setup_data.task = &data->task;
        task_setup_data.callback_data = data;
 
        data->cred = get_current_cred();
index c08882f5867b2a3268cc0358f7e13704840df183..2c1b027774d41c84c6f8e334c7e4a9f6bfaefaf4 100644 (file)
@@ -236,6 +236,13 @@ nfsd_file_check_write_error(struct nfsd_file *nf)
        return filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err));
 }
 
+static void
+nfsd_file_flush(struct nfsd_file *nf)
+{
+       if (nf->nf_file && vfs_fsync(nf->nf_file, 1) != 0)
+               nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
+}
+
 static void
 nfsd_file_do_unhash(struct nfsd_file *nf)
 {
@@ -295,19 +302,15 @@ nfsd_file_put_noref(struct nfsd_file *nf)
 void
 nfsd_file_put(struct nfsd_file *nf)
 {
-       bool is_hashed;
-
        set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
-       if (refcount_read(&nf->nf_ref) > 2 || !nf->nf_file) {
+       if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0) {
+               nfsd_file_flush(nf);
                nfsd_file_put_noref(nf);
-               return;
+       } else {
+               nfsd_file_put_noref(nf);
+               if (nf->nf_file)
+                       nfsd_file_schedule_laundrette();
        }
-
-       filemap_flush(nf->nf_file->f_mapping);
-       is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0;
-       nfsd_file_put_noref(nf);
-       if (is_hashed)
-               nfsd_file_schedule_laundrette();
        if (atomic_long_read(&nfsd_filecache_count) >= NFSD_FILE_LRU_LIMIT)
                nfsd_file_gc();
 }
@@ -328,6 +331,7 @@ nfsd_file_dispose_list(struct list_head *dispose)
        while(!list_empty(dispose)) {
                nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
                list_del(&nf->nf_lru);
+               nfsd_file_flush(nf);
                nfsd_file_put_noref(nf);
        }
 }
@@ -341,6 +345,7 @@ nfsd_file_dispose_list_sync(struct list_head *dispose)
        while(!list_empty(dispose)) {
                nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
                list_del(&nf->nf_lru);
+               nfsd_file_flush(nf);
                if (!refcount_dec_and_test(&nf->nf_ref))
                        continue;
                if (nfsd_file_free(nf))
index 367551bddfc63e512d51d7b0012d6aa89cf2c570..b5760801d3775a723bb021f2a1145b615edadfea 100644 (file)
@@ -249,34 +249,34 @@ nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
        int w;
 
        if (!svcxdr_encode_stat(xdr, resp->status))
-               return 0;
+               return false;
 
        if (dentry == NULL || d_really_is_negative(dentry))
-               return 1;
+               return true;
        inode = d_inode(dentry);
 
        if (!svcxdr_encode_fattr(rqstp, xdr, &resp->fh, &resp->stat))
-               return 0;
+               return false;
        if (xdr_stream_encode_u32(xdr, resp->mask) < 0)
-               return 0;
+               return false;
 
        rqstp->rq_res.page_len = w = nfsacl_size(
                (resp->mask & NFS_ACL)   ? resp->acl_access  : NULL,
                (resp->mask & NFS_DFACL) ? resp->acl_default : NULL);
        while (w > 0) {
                if (!*(rqstp->rq_next_page++))
-                       return 1;
+                       return true;
                w -= PAGE_SIZE;
        }
 
        if (!nfs_stream_encode_acl(xdr, inode, resp->acl_access,
                                   resp->mask & NFS_ACL, 0))
-               return 0;
+               return false;
        if (!nfs_stream_encode_acl(xdr, inode, resp->acl_default,
                                   resp->mask & NFS_DFACL, NFS_ACL_DEFAULT))
-               return 0;
+               return false;
 
-       return 1;
+       return true;
 }
 
 /* ACCESS */
@@ -286,17 +286,17 @@ nfsaclsvc_encode_accessres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
        struct nfsd3_accessres *resp = rqstp->rq_resp;
 
        if (!svcxdr_encode_stat(xdr, resp->status))
-               return 0;
+               return false;
        switch (resp->status) {
        case nfs_ok:
                if (!svcxdr_encode_fattr(rqstp, xdr, &resp->fh, &resp->stat))
-                       return 0;
+                       return false;
                if (xdr_stream_encode_u32(xdr, resp->access) < 0)
-                       return 0;
+                       return false;
                break;
        }
 
-       return 1;
+       return true;
 }
 
 /*
index 66bdaa2cf496adefcc4a9940a36da544491dada0..ca611ac09f7c18314dfd802d359ebffa73748412 100644 (file)
 #include "page.h"
 #include "btnode.h"
 
+
+/**
+ * nilfs_init_btnc_inode - initialize B-tree node cache inode
+ * @btnc_inode: inode to be initialized
+ *
+ * nilfs_init_btnc_inode() sets up an inode for B-tree node cache.
+ */
+void nilfs_init_btnc_inode(struct inode *btnc_inode)
+{
+       struct nilfs_inode_info *ii = NILFS_I(btnc_inode);
+
+       btnc_inode->i_mode = S_IFREG;
+       ii->i_flags = 0;
+       memset(&ii->i_bmap_data, 0, sizeof(struct nilfs_bmap));
+       mapping_set_gfp_mask(btnc_inode->i_mapping, GFP_NOFS);
+}
+
 void nilfs_btnode_cache_clear(struct address_space *btnc)
 {
        invalidate_mapping_pages(btnc, 0, -1);
@@ -29,7 +46,7 @@ void nilfs_btnode_cache_clear(struct address_space *btnc)
 struct buffer_head *
 nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
 {
-       struct inode *inode = NILFS_BTNC_I(btnc);
+       struct inode *inode = btnc->host;
        struct buffer_head *bh;
 
        bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
@@ -57,7 +74,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
                              struct buffer_head **pbh, sector_t *submit_ptr)
 {
        struct buffer_head *bh;
-       struct inode *inode = NILFS_BTNC_I(btnc);
+       struct inode *inode = btnc->host;
        struct page *page;
        int err;
 
@@ -157,7 +174,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
                                    struct nilfs_btnode_chkey_ctxt *ctxt)
 {
        struct buffer_head *obh, *nbh;
-       struct inode *inode = NILFS_BTNC_I(btnc);
+       struct inode *inode = btnc->host;
        __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
        int err;
 
index 11663650add73cb4db1057596fa4d2e30e1ed297..bd5544e63a01dc3876227f210583d4ae94e8df90 100644 (file)
@@ -30,6 +30,7 @@ struct nilfs_btnode_chkey_ctxt {
        struct buffer_head *newbh;
 };
 
+void nilfs_init_btnc_inode(struct inode *btnc_inode);
 void nilfs_btnode_cache_clear(struct address_space *);
 struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
                                              __u64 blocknr);
index 3594eabe14194e0b45c98c98b24855ae9fc1334e..f544c22fff78b27262e49ac9054097b46caaccaf 100644 (file)
@@ -58,7 +58,8 @@ static void nilfs_btree_free_path(struct nilfs_btree_path *path)
 static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree,
                                     __u64 ptr, struct buffer_head **bhp)
 {
-       struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache;
+       struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
+       struct address_space *btnc = btnc_inode->i_mapping;
        struct buffer_head *bh;
 
        bh = nilfs_btnode_create_block(btnc, ptr);
@@ -470,7 +471,8 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
                                   struct buffer_head **bhp,
                                   const struct nilfs_btree_readahead_info *ra)
 {
-       struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache;
+       struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
+       struct address_space *btnc = btnc_inode->i_mapping;
        struct buffer_head *bh, *ra_bh;
        sector_t submit_ptr = 0;
        int ret;
@@ -1741,6 +1743,10 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *btree, __u64 key,
                dat = nilfs_bmap_get_dat(btree);
        }
 
+       ret = nilfs_attach_btree_node_cache(&NILFS_BMAP_I(btree)->vfs_inode);
+       if (ret < 0)
+               return ret;
+
        ret = nilfs_bmap_prepare_alloc_ptr(btree, dreq, dat);
        if (ret < 0)
                return ret;
@@ -1913,7 +1919,7 @@ static int nilfs_btree_prepare_update_v(struct nilfs_bmap *btree,
                path[level].bp_ctxt.newkey = path[level].bp_newreq.bpr_ptr;
                path[level].bp_ctxt.bh = path[level].bp_bh;
                ret = nilfs_btnode_prepare_change_key(
-                       &NILFS_BMAP_I(btree)->i_btnode_cache,
+                       NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
                        &path[level].bp_ctxt);
                if (ret < 0) {
                        nilfs_dat_abort_update(dat,
@@ -1939,7 +1945,7 @@ static void nilfs_btree_commit_update_v(struct nilfs_bmap *btree,
 
        if (buffer_nilfs_node(path[level].bp_bh)) {
                nilfs_btnode_commit_change_key(
-                       &NILFS_BMAP_I(btree)->i_btnode_cache,
+                       NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
                        &path[level].bp_ctxt);
                path[level].bp_bh = path[level].bp_ctxt.bh;
        }
@@ -1958,7 +1964,7 @@ static void nilfs_btree_abort_update_v(struct nilfs_bmap *btree,
                               &path[level].bp_newreq.bpr_req);
        if (buffer_nilfs_node(path[level].bp_bh))
                nilfs_btnode_abort_change_key(
-                       &NILFS_BMAP_I(btree)->i_btnode_cache,
+                       NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
                        &path[level].bp_ctxt);
 }
 
@@ -2134,7 +2140,8 @@ static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree,
 static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree,
                                             struct list_head *listp)
 {
-       struct address_space *btcache = &NILFS_BMAP_I(btree)->i_btnode_cache;
+       struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
+       struct address_space *btcache = btnc_inode->i_mapping;
        struct list_head lists[NILFS_BTREE_LEVEL_MAX];
        struct pagevec pvec;
        struct buffer_head *bh, *head;
@@ -2188,12 +2195,12 @@ static int nilfs_btree_assign_p(struct nilfs_bmap *btree,
                path[level].bp_ctxt.newkey = blocknr;
                path[level].bp_ctxt.bh = *bh;
                ret = nilfs_btnode_prepare_change_key(
-                       &NILFS_BMAP_I(btree)->i_btnode_cache,
+                       NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
                        &path[level].bp_ctxt);
                if (ret < 0)
                        return ret;
                nilfs_btnode_commit_change_key(
-                       &NILFS_BMAP_I(btree)->i_btnode_cache,
+                       NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
                        &path[level].bp_ctxt);
                *bh = path[level].bp_ctxt.bh;
        }
@@ -2398,6 +2405,10 @@ int nilfs_btree_init(struct nilfs_bmap *bmap)
 
        if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap), bmap->b_inode))
                ret = -EIO;
+       else
+               ret = nilfs_attach_btree_node_cache(
+                       &NILFS_BMAP_I(bmap)->vfs_inode);
+
        return ret;
 }
 
index dc51d3b7a7bff1a66e597bfbb4e31c8a30172741..3b55e239705f4091af860201ff12d82430935826 100644 (file)
@@ -497,7 +497,9 @@ int nilfs_dat_read(struct super_block *sb, size_t entry_size,
        di = NILFS_DAT_I(dat);
        lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
        nilfs_palloc_setup_cache(dat, &di->palloc_cache);
-       nilfs_mdt_setup_shadow_map(dat, &di->shadow);
+       err = nilfs_mdt_setup_shadow_map(dat, &di->shadow);
+       if (err)
+               goto failed;
 
        err = nilfs_read_inode_common(dat, raw_inode);
        if (err)
index a8f5315f01e3a8ab2274844c050949ea64b091d4..04fdd420eae72f58d4afe943ca34ec7fa116b2ef 100644 (file)
@@ -126,9 +126,10 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
 int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
                                   __u64 vbn, struct buffer_head **out_bh)
 {
+       struct inode *btnc_inode = NILFS_I(inode)->i_assoc_inode;
        int ret;
 
-       ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache,
+       ret = nilfs_btnode_submit_block(btnc_inode->i_mapping,
                                        vbn ? : pbn, pbn, REQ_OP_READ, 0,
                                        out_bh, &pbn);
        if (ret == -EEXIST) /* internal code (cache hit) */
@@ -170,7 +171,7 @@ int nilfs_init_gcinode(struct inode *inode)
        ii->i_flags = 0;
        nilfs_bmap_init_gc(ii->i_bmap);
 
-       return 0;
+       return nilfs_attach_btree_node_cache(inode);
 }
 
 /**
@@ -185,7 +186,7 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs)
                ii = list_first_entry(head, struct nilfs_inode_info, i_dirty);
                list_del_init(&ii->i_dirty);
                truncate_inode_pages(&ii->vfs_inode.i_data, 0);
-               nilfs_btnode_cache_clear(&ii->i_btnode_cache);
+               nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
                iput(&ii->vfs_inode);
        }
 }
index 476a4a649f38a370877930bf171c04257ef3dd82..6045cea21f52248a1c4913cd3eb5c2d40b008a56 100644 (file)
  * @cno: checkpoint number
  * @root: pointer on NILFS root object (mounted checkpoint)
  * @for_gc: inode for GC flag
+ * @for_btnc: inode for B-tree node cache flag
+ * @for_shadow: inode for shadowed page cache flag
  */
 struct nilfs_iget_args {
        u64 ino;
        __u64 cno;
        struct nilfs_root *root;
-       int for_gc;
+       bool for_gc;
+       bool for_btnc;
+       bool for_shadow;
 };
 
 static int nilfs_iget_test(struct inode *inode, void *opaque);
@@ -312,7 +316,8 @@ static int nilfs_insert_inode_locked(struct inode *inode,
                                     unsigned long ino)
 {
        struct nilfs_iget_args args = {
-               .ino = ino, .root = root, .cno = 0, .for_gc = 0
+               .ino = ino, .root = root, .cno = 0, .for_gc = false,
+               .for_btnc = false, .for_shadow = false
        };
 
        return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
@@ -525,6 +530,19 @@ static int nilfs_iget_test(struct inode *inode, void *opaque)
                return 0;
 
        ii = NILFS_I(inode);
+       if (test_bit(NILFS_I_BTNC, &ii->i_state)) {
+               if (!args->for_btnc)
+                       return 0;
+       } else if (args->for_btnc) {
+               return 0;
+       }
+       if (test_bit(NILFS_I_SHADOW, &ii->i_state)) {
+               if (!args->for_shadow)
+                       return 0;
+       } else if (args->for_shadow) {
+               return 0;
+       }
+
        if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
                return !args->for_gc;
 
@@ -536,15 +554,17 @@ static int nilfs_iget_set(struct inode *inode, void *opaque)
        struct nilfs_iget_args *args = opaque;
 
        inode->i_ino = args->ino;
-       if (args->for_gc) {
+       NILFS_I(inode)->i_cno = args->cno;
+       NILFS_I(inode)->i_root = args->root;
+       if (args->root && args->ino == NILFS_ROOT_INO)
+               nilfs_get_root(args->root);
+
+       if (args->for_gc)
                NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
-               NILFS_I(inode)->i_cno = args->cno;
-               NILFS_I(inode)->i_root = NULL;
-       } else {
-               if (args->root && args->ino == NILFS_ROOT_INO)
-                       nilfs_get_root(args->root);
-               NILFS_I(inode)->i_root = args->root;
-       }
+       if (args->for_btnc)
+               NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC);
+       if (args->for_shadow)
+               NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW);
        return 0;
 }
 
@@ -552,7 +572,8 @@ struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
                            unsigned long ino)
 {
        struct nilfs_iget_args args = {
-               .ino = ino, .root = root, .cno = 0, .for_gc = 0
+               .ino = ino, .root = root, .cno = 0, .for_gc = false,
+               .for_btnc = false, .for_shadow = false
        };
 
        return ilookup5(sb, ino, nilfs_iget_test, &args);
@@ -562,7 +583,8 @@ struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
                                unsigned long ino)
 {
        struct nilfs_iget_args args = {
-               .ino = ino, .root = root, .cno = 0, .for_gc = 0
+               .ino = ino, .root = root, .cno = 0, .for_gc = false,
+               .for_btnc = false, .for_shadow = false
        };
 
        return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
@@ -593,7 +615,8 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
                                __u64 cno)
 {
        struct nilfs_iget_args args = {
-               .ino = ino, .root = NULL, .cno = cno, .for_gc = 1
+               .ino = ino, .root = NULL, .cno = cno, .for_gc = true,
+               .for_btnc = false, .for_shadow = false
        };
        struct inode *inode;
        int err;
@@ -613,6 +636,113 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
        return inode;
 }
 
+/**
+ * nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode
+ * @inode: inode object
+ *
+ * nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode,
+ * or does nothing if the inode already has it.  This function allocates
+ * an additional inode to maintain page cache of B-tree nodes one-on-one.
+ *
+ * Return Value: On success, 0 is returned. On errors, one of the following
+ * negative error code is returned.
+ *
+ * %-ENOMEM - Insufficient memory available.
+ */
+int nilfs_attach_btree_node_cache(struct inode *inode)
+{
+       struct nilfs_inode_info *ii = NILFS_I(inode);
+       struct inode *btnc_inode;
+       struct nilfs_iget_args args;
+
+       if (ii->i_assoc_inode)
+               return 0;
+
+       args.ino = inode->i_ino;
+       args.root = ii->i_root;
+       args.cno = ii->i_cno;
+       args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0;
+       args.for_btnc = true;
+       args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0;
+
+       btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
+                                 nilfs_iget_set, &args);
+       if (unlikely(!btnc_inode))
+               return -ENOMEM;
+       if (btnc_inode->i_state & I_NEW) {
+               nilfs_init_btnc_inode(btnc_inode);
+               unlock_new_inode(btnc_inode);
+       }
+       NILFS_I(btnc_inode)->i_assoc_inode = inode;
+       NILFS_I(btnc_inode)->i_bmap = ii->i_bmap;
+       ii->i_assoc_inode = btnc_inode;
+
+       return 0;
+}
+
+/**
+ * nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode
+ * @inode: inode object
+ *
+ * nilfs_detach_btree_node_cache() detaches the B-tree node cache and its
+ * holder inode bound to @inode, or does nothing if @inode doesn't have it.
+ */
+void nilfs_detach_btree_node_cache(struct inode *inode)
+{
+       struct nilfs_inode_info *ii = NILFS_I(inode);
+       struct inode *btnc_inode = ii->i_assoc_inode;
+
+       if (btnc_inode) {
+               NILFS_I(btnc_inode)->i_assoc_inode = NULL;
+               ii->i_assoc_inode = NULL;
+               iput(btnc_inode);
+       }
+}
+
+/**
+ * nilfs_iget_for_shadow - obtain inode for shadow mapping
+ * @inode: inode object that uses shadow mapping
+ *
+ * nilfs_iget_for_shadow() allocates a pair of inodes that holds page
+ * caches for shadow mapping.  The page cache for data pages is set up
+ * in one inode and the one for b-tree node pages is set up in the
+ * other inode, which is attached to the former inode.
+ *
+ * Return Value: On success, a pointer to the inode for data pages is
+ * returned. On errors, one of the following negative error code is returned
+ * in a pointer type.
+ *
+ * %-ENOMEM - Insufficient memory available.
+ */
+struct inode *nilfs_iget_for_shadow(struct inode *inode)
+{
+       struct nilfs_iget_args args = {
+               .ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false,
+               .for_btnc = false, .for_shadow = true
+       };
+       struct inode *s_inode;
+       int err;
+
+       s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
+                              nilfs_iget_set, &args);
+       if (unlikely(!s_inode))
+               return ERR_PTR(-ENOMEM);
+       if (!(s_inode->i_state & I_NEW))
+               return inode;
+
+       NILFS_I(s_inode)->i_flags = 0;
+       memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap));
+       mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS);
+
+       err = nilfs_attach_btree_node_cache(s_inode);
+       if (unlikely(err)) {
+               iget_failed(s_inode);
+               return ERR_PTR(err);
+       }
+       unlock_new_inode(s_inode);
+       return s_inode;
+}
+
 void nilfs_write_inode_common(struct inode *inode,
                              struct nilfs_inode *raw_inode, int has_bmap)
 {
@@ -760,7 +890,8 @@ static void nilfs_clear_inode(struct inode *inode)
        if (test_bit(NILFS_I_BMAP, &ii->i_state))
                nilfs_bmap_clear(ii->i_bmap);
 
-       nilfs_btnode_cache_clear(&ii->i_btnode_cache);
+       if (!test_bit(NILFS_I_BTNC, &ii->i_state))
+               nilfs_detach_btree_node_cache(inode);
 
        if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
                nilfs_put_root(ii->i_root);
index 78db33decd72fb04db41f4f515c6b87a54d1158a..d29a0f2b9c16ecd78e5d85626acb361a0ee72baf 100644 (file)
@@ -471,9 +471,18 @@ int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
 void nilfs_mdt_clear(struct inode *inode)
 {
        struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
+       struct nilfs_shadow_map *shadow = mdi->mi_shadow;
 
        if (mdi->mi_palloc_cache)
                nilfs_palloc_destroy_cache(inode);
+
+       if (shadow) {
+               struct inode *s_inode = shadow->inode;
+
+               shadow->inode = NULL;
+               iput(s_inode);
+               mdi->mi_shadow = NULL;
+       }
 }
 
 /**
@@ -507,12 +516,15 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
                               struct nilfs_shadow_map *shadow)
 {
        struct nilfs_mdt_info *mi = NILFS_MDT(inode);
+       struct inode *s_inode;
 
        INIT_LIST_HEAD(&shadow->frozen_buffers);
-       address_space_init_once(&shadow->frozen_data);
-       nilfs_mapping_init(&shadow->frozen_data, inode);
-       address_space_init_once(&shadow->frozen_btnodes);
-       nilfs_mapping_init(&shadow->frozen_btnodes, inode);
+
+       s_inode = nilfs_iget_for_shadow(inode);
+       if (IS_ERR(s_inode))
+               return PTR_ERR(s_inode);
+
+       shadow->inode = s_inode;
        mi->mi_shadow = shadow;
        return 0;
 }
@@ -526,14 +538,15 @@ int nilfs_mdt_save_to_shadow_map(struct inode *inode)
        struct nilfs_mdt_info *mi = NILFS_MDT(inode);
        struct nilfs_inode_info *ii = NILFS_I(inode);
        struct nilfs_shadow_map *shadow = mi->mi_shadow;
+       struct inode *s_inode = shadow->inode;
        int ret;
 
-       ret = nilfs_copy_dirty_pages(&shadow->frozen_data, inode->i_mapping);
+       ret = nilfs_copy_dirty_pages(s_inode->i_mapping, inode->i_mapping);
        if (ret)
                goto out;
 
-       ret = nilfs_copy_dirty_pages(&shadow->frozen_btnodes,
-                                    &ii->i_btnode_cache);
+       ret = nilfs_copy_dirty_pages(NILFS_I(s_inode)->i_assoc_inode->i_mapping,
+                                    ii->i_assoc_inode->i_mapping);
        if (ret)
                goto out;
 
@@ -549,7 +562,7 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
        struct page *page;
        int blkbits = inode->i_blkbits;
 
-       page = grab_cache_page(&shadow->frozen_data, bh->b_page->index);
+       page = grab_cache_page(shadow->inode->i_mapping, bh->b_page->index);
        if (!page)
                return -ENOMEM;
 
@@ -581,7 +594,7 @@ nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
        struct page *page;
        int n;
 
-       page = find_lock_page(&shadow->frozen_data, bh->b_page->index);
+       page = find_lock_page(shadow->inode->i_mapping, bh->b_page->index);
        if (page) {
                if (page_has_buffers(page)) {
                        n = bh_offset(bh) >> inode->i_blkbits;
@@ -622,10 +635,11 @@ void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
                nilfs_palloc_clear_cache(inode);
 
        nilfs_clear_dirty_pages(inode->i_mapping, true);
-       nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data);
+       nilfs_copy_back_pages(inode->i_mapping, shadow->inode->i_mapping);
 
-       nilfs_clear_dirty_pages(&ii->i_btnode_cache, true);
-       nilfs_copy_back_pages(&ii->i_btnode_cache, &shadow->frozen_btnodes);
+       nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping, true);
+       nilfs_copy_back_pages(ii->i_assoc_inode->i_mapping,
+                             NILFS_I(shadow->inode)->i_assoc_inode->i_mapping);
 
        nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store);
 
@@ -640,10 +654,11 @@ void nilfs_mdt_clear_shadow_map(struct inode *inode)
 {
        struct nilfs_mdt_info *mi = NILFS_MDT(inode);
        struct nilfs_shadow_map *shadow = mi->mi_shadow;
+       struct inode *shadow_btnc_inode = NILFS_I(shadow->inode)->i_assoc_inode;
 
        down_write(&mi->mi_sem);
        nilfs_release_frozen_buffers(shadow);
-       truncate_inode_pages(&shadow->frozen_data, 0);
-       truncate_inode_pages(&shadow->frozen_btnodes, 0);
+       truncate_inode_pages(shadow->inode->i_mapping, 0);
+       truncate_inode_pages(shadow_btnc_inode->i_mapping, 0);
        up_write(&mi->mi_sem);
 }
index 8f86080a436de6cebedafc4bd1d49629539298c6..9e23bab3ff127b9cb20f4d0779571534526fe9e8 100644 (file)
 /**
  * struct nilfs_shadow_map - shadow mapping of meta data file
  * @bmap_store: shadow copy of bmap state
- * @frozen_data: shadowed dirty data pages
- * @frozen_btnodes: shadowed dirty b-tree nodes' pages
+ * @inode: holder of page caches used in shadow mapping
  * @frozen_buffers: list of frozen buffers
  */
 struct nilfs_shadow_map {
        struct nilfs_bmap_store bmap_store;
-       struct address_space frozen_data;
-       struct address_space frozen_btnodes;
+       struct inode *inode;
        struct list_head frozen_buffers;
 };
 
index a7b81755c3501a28cd3dbe849f07b9f92b3fd8b6..1344f7d475d3c664dc18746ce489a3ae0fcae989 100644 (file)
@@ -28,7 +28,7 @@
  * @i_xattr: <TODO>
  * @i_dir_start_lookup: page index of last successful search
  * @i_cno: checkpoint number for GC inode
- * @i_btnode_cache: cached pages of b-tree nodes
+ * @i_assoc_inode: associated inode (B-tree node cache holder or back pointer)
  * @i_dirty: list for connecting dirty files
  * @xattr_sem: semaphore for extended attributes processing
  * @i_bh: buffer contains disk inode
@@ -43,7 +43,7 @@ struct nilfs_inode_info {
        __u64 i_xattr;  /* sector_t ??? */
        __u32 i_dir_start_lookup;
        __u64 i_cno;            /* check point number for GC inode */
-       struct address_space i_btnode_cache;
+       struct inode *i_assoc_inode;
        struct list_head i_dirty;       /* List for connecting dirty files */
 
 #ifdef CONFIG_NILFS_XATTR
@@ -75,13 +75,6 @@ NILFS_BMAP_I(const struct nilfs_bmap *bmap)
        return container_of(bmap, struct nilfs_inode_info, i_bmap_data);
 }
 
-static inline struct inode *NILFS_BTNC_I(struct address_space *btnc)
-{
-       struct nilfs_inode_info *ii =
-               container_of(btnc, struct nilfs_inode_info, i_btnode_cache);
-       return &ii->vfs_inode;
-}
-
 /*
  * Dynamic state flags of NILFS on-memory inode (i_state)
  */
@@ -98,6 +91,8 @@ enum {
        NILFS_I_INODE_SYNC,             /* dsync is not allowed for inode */
        NILFS_I_BMAP,                   /* has bmap and btnode_cache */
        NILFS_I_GCINODE,                /* inode for GC, on memory only */
+       NILFS_I_BTNC,                   /* inode for btree node cache */
+       NILFS_I_SHADOW,                 /* inode for shadowed page cache */
 };
 
 /*
@@ -267,6 +262,9 @@ struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
                         unsigned long ino);
 extern struct inode *nilfs_iget_for_gc(struct super_block *sb,
                                       unsigned long ino, __u64 cno);
+int nilfs_attach_btree_node_cache(struct inode *inode);
+void nilfs_detach_btree_node_cache(struct inode *inode);
+struct inode *nilfs_iget_for_shadow(struct inode *inode);
 extern void nilfs_update_inode(struct inode *, struct buffer_head *, int);
 extern void nilfs_truncate(struct inode *);
 extern void nilfs_evict_inode(struct inode *);
index 063dd16d75b590d392e3e449c94f88f670d54799..a8e88cc38e16a88a9346c36a5babca39c1517759 100644 (file)
@@ -436,22 +436,12 @@ unsigned int nilfs_page_count_clean_buffers(struct page *page,
        return nc;
 }
 
-void nilfs_mapping_init(struct address_space *mapping, struct inode *inode)
-{
-       mapping->host = inode;
-       mapping->flags = 0;
-       mapping_set_gfp_mask(mapping, GFP_NOFS);
-       mapping->private_data = NULL;
-       mapping->a_ops = &empty_aops;
-}
-
 /*
  * NILFS2 needs clear_page_dirty() in the following two cases:
  *
- * 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears
- *    page dirty flags when it copies back pages from the shadow cache
- *    (gcdat->{i_mapping,i_btnode_cache}) to its original cache
- *    (dat->{i_mapping,i_btnode_cache}).
+ * 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty
+ *    flag of pages when it copies back pages from shadow cache to the
+ *    original cache.
  *
  * 2) Some B-tree operations like insertion or deletion may dispose buffers
  *    in dirty state, and this needs to cancel the dirty state of their pages.
index 569263b23c0c49c26299f066354d7907c32df8b5..21ddcdd4d63e553fa5da1e8fa93d805e5cea57c8 100644 (file)
@@ -43,7 +43,6 @@ int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
 void nilfs_copy_back_pages(struct address_space *, struct address_space *);
 void nilfs_clear_dirty_page(struct page *, bool);
 void nilfs_clear_dirty_pages(struct address_space *, bool);
-void nilfs_mapping_init(struct address_space *mapping, struct inode *inode);
 unsigned int nilfs_page_count_clean_buffers(struct page *, unsigned int,
                                            unsigned int);
 unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
index 85a8533347718543d366de4ea35126e0ab3ef54a..0afe0832c7547174e17b4efa6b39b51d0c131ee6 100644 (file)
@@ -733,15 +733,18 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
                                            struct list_head *listp)
 {
        struct nilfs_inode_info *ii = NILFS_I(inode);
-       struct address_space *mapping = &ii->i_btnode_cache;
+       struct inode *btnc_inode = ii->i_assoc_inode;
        struct pagevec pvec;
        struct buffer_head *bh, *head;
        unsigned int i;
        pgoff_t index = 0;
 
+       if (!btnc_inode)
+               return;
+
        pagevec_init(&pvec);
 
-       while (pagevec_lookup_tag(&pvec, mapping, &index,
+       while (pagevec_lookup_tag(&pvec, btnc_inode->i_mapping, &index,
                                        PAGECACHE_TAG_DIRTY)) {
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        bh = head = page_buffers(pvec.pages[i]);
@@ -2410,7 +2413,7 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
                        continue;
                list_del_init(&ii->i_dirty);
                truncate_inode_pages(&ii->vfs_inode.i_data, 0);
-               nilfs_btnode_cache_clear(&ii->i_btnode_cache);
+               nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
                iput(&ii->vfs_inode);
        }
 }
index 3e05c98631ec6bbf87bdb1605e94ee6466128e52..ba108f915391e35c2b900c864f4f436ba91d54f3 100644 (file)
@@ -157,7 +157,8 @@ struct inode *nilfs_alloc_inode(struct super_block *sb)
        ii->i_bh = NULL;
        ii->i_state = 0;
        ii->i_cno = 0;
-       nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode);
+       ii->i_assoc_inode = NULL;
+       ii->i_bmap = &ii->i_bmap_data;
        return &ii->vfs_inode;
 }
 
@@ -1377,8 +1378,6 @@ static void nilfs_inode_init_once(void *obj)
 #ifdef CONFIG_NILFS_XATTR
        init_rwsem(&ii->xattr_sem);
 #endif
-       address_space_init_once(&ii->i_btnode_cache);
-       ii->i_bmap = &ii->i_bmap_data;
        inode_init_once(&ii->vfs_inode);
 }
 
index d154dcfe06afdd5f4dcd4c449ab132722ba252ec..90e3dad8ee454a65cdfe1002b69775a497a95649 100644 (file)
@@ -1746,7 +1746,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
                set_buffer_dirty(bh);
        } while ((bh = bh->b_this_page) != head);
        spin_unlock(&mapping->private_lock);
-       block_dirty_folio(mapping, page_folio(page));
+       filemap_dirty_folio(mapping, page_folio(page));
        if (unlikely(buffers_to_free)) {
                do {
                        bh = buffers_to_free->b_this_page;
index 273f65e0aabac127be3d4175ee6b205b94a0aeef..0b6f551a342a176e8f3ad79aa1f11061a4946655 100644 (file)
@@ -337,7 +337,6 @@ void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
 /* Read information header from global quota file */
 int ocfs2_global_read_info(struct super_block *sb, int type)
 {
-       struct inode *gqinode = NULL;
        unsigned int ino[OCFS2_MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
                                              GROUP_QUOTA_SYSTEM_INODE };
        struct ocfs2_global_disk_dqinfo dinfo;
@@ -346,29 +345,31 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
        u64 pcount;
        int status;
 
+       oinfo->dqi_gi.dqi_sb = sb;
+       oinfo->dqi_gi.dqi_type = type;
+       ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
+       oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
+       oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
+       oinfo->dqi_gqi_bh = NULL;
+       oinfo->dqi_gqi_count = 0;
+
        /* Read global header */
-       gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
+       oinfo->dqi_gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
                        OCFS2_INVALID_SLOT);
-       if (!gqinode) {
+       if (!oinfo->dqi_gqinode) {
                mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
                        type);
                status = -EINVAL;
                goto out_err;
        }
-       oinfo->dqi_gi.dqi_sb = sb;
-       oinfo->dqi_gi.dqi_type = type;
-       oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
-       oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
-       oinfo->dqi_gqi_bh = NULL;
-       oinfo->dqi_gqi_count = 0;
-       oinfo->dqi_gqinode = gqinode;
+
        status = ocfs2_lock_global_qf(oinfo, 0);
        if (status < 0) {
                mlog_errno(status);
                goto out_err;
        }
 
-       status = ocfs2_extent_map_get_blocks(gqinode, 0, &oinfo->dqi_giblk,
+       status = ocfs2_extent_map_get_blocks(oinfo->dqi_gqinode, 0, &oinfo->dqi_giblk,
                                             &pcount, NULL);
        if (status < 0)
                goto out_unlock;
index 0e4b16d4c037fae3f3eda96edcddf53800b55b21..b1a8b046f4c22a990af8c8bb26ecb709394dddf0 100644 (file)
@@ -702,8 +702,6 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
        info->dqi_priv = oinfo;
        oinfo->dqi_type = type;
        INIT_LIST_HEAD(&oinfo->dqi_chunk);
-       oinfo->dqi_gqinode = NULL;
-       ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
        oinfo->dqi_rec = NULL;
        oinfo->dqi_lqi_bh = NULL;
        oinfo->dqi_libh = NULL;
index 9648ac15164a2194fa6e28812777aac0ffc550fe..e140ea150bbb14e0ab0c83808e0b155b158bfc6a 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -804,7 +804,7 @@ struct pipe_inode_info *alloc_pipe_info(void)
        if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
                goto out_revert_acct;
 
-       pipe->bufs = kvcalloc(pipe_bufs, sizeof(struct pipe_buffer),
+       pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
                             GFP_KERNEL_ACCOUNT);
 
        if (pipe->bufs) {
@@ -849,7 +849,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
 #endif
        if (pipe->tmp_page)
                __free_page(pipe->tmp_page);
-       kvfree(pipe->bufs);
+       kfree(pipe->bufs);
        kfree(pipe);
 }
 
@@ -1264,7 +1264,8 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
        if (nr_slots < n)
                return -EBUSY;
 
-       bufs = kvcalloc(nr_slots, sizeof(*bufs), GFP_KERNEL_ACCOUNT);
+       bufs = kcalloc(nr_slots, sizeof(*bufs),
+                      GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
        if (unlikely(!bufs))
                return -ENOMEM;
 
@@ -1291,7 +1292,7 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
        head = n;
        tail = 0;
 
-       kvfree(pipe->bufs);
+       kfree(pipe->bufs);
        pipe->bufs = bufs;
        pipe->ring_size = nr_slots;
        if (pipe->max_usage > nr_slots)
index 80acb6885cf90b6fce2aa5b64d7b6c3b10d6687a..962d32468eb487afd42a14da7afd7589aaeaac98 100644 (file)
@@ -759,9 +759,14 @@ static void posix_acl_fix_xattr_userns(
 }
 
 void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
+                                  struct inode *inode,
                                   void *value, size_t size)
 {
        struct user_namespace *user_ns = current_user_ns();
+
+       /* Leave ids untouched on non-idmapped mounts. */
+       if (no_idmapping(mnt_userns, i_user_ns(inode)))
+               mnt_userns = &init_user_ns;
        if ((user_ns == &init_user_ns) && (mnt_userns == &init_user_ns))
                return;
        posix_acl_fix_xattr_userns(&init_user_ns, user_ns, mnt_userns, value,
@@ -769,9 +774,14 @@ void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
 }
 
 void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns,
+                                struct inode *inode,
                                 void *value, size_t size)
 {
        struct user_namespace *user_ns = current_user_ns();
+
+       /* Leave ids untouched on non-idmapped mounts. */
+       if (no_idmapping(mnt_userns, i_user_ns(inode)))
+               mnt_userns = &init_user_ns;
        if ((user_ns == &init_user_ns) && (mnt_userns == &init_user_ns))
                return;
        posix_acl_fix_xattr_userns(user_ns, &init_user_ns, mnt_userns, value,
index 6d8d4bf2083773d097ec1fbbf1b1abe34721a731..2e244ada1f970b40dfc11417e5fd43579074d42f 100644 (file)
@@ -32,6 +32,8 @@ static int __init copy_xbc_key_value_list(char *dst, size_t size)
        int ret = 0;
 
        key = kzalloc(XBC_KEYLEN_MAX, GFP_KERNEL);
+       if (!key)
+               return -ENOMEM;
 
        xbc_for_each_key_value(leaf, val) {
                ret = xbc_node_compose_key(leaf, key, XBC_KEYLEN_MAX);
index dc5000173b80a2e36d576df7afb49706e44c0a92..e643aec2b0efe929bce63d6b4d1ddcf81b0b0399 100644 (file)
@@ -1630,7 +1630,6 @@ int generic_write_checks_count(struct kiocb *iocb, loff_t *count)
        if (!*count)
                return 0;
 
-       /* FIXME: this is for backwards compatibility with 2.4 */
        if (iocb->ki_flags & IOCB_APPEND)
                iocb->ki_pos = i_size_read(inode);
 
index f8e1f4ee87ffca16ee019b4dcd0d29c4fd3fdef3..7ab8a58c29b618b9ead091266dd5b4c55f58c0cb 100644 (file)
@@ -554,9 +554,9 @@ int seq_dentry(struct seq_file *m, struct dentry *dentry, const char *esc)
 }
 EXPORT_SYMBOL(seq_dentry);
 
-static void *single_start(struct seq_file *p, loff_t *pos)
+void *single_start(struct seq_file *p, loff_t *pos)
 {
-       return NULL + (*pos == 0);
+       return *pos ? NULL : SEQ_START_TOKEN;
 }
 
 static void *single_next(struct seq_file *p, void *v, loff_t *pos)
index 38b8fc51486049d4de706f9fcdec3ef1d0bdaf6d..0507aecfc6694fe10aa827fc85578562988d90a8 100644 (file)
 
 #define NUMBER_OF_SMB2_COMMANDS        0x0013
 
+/*
+ * Size of the session key (crypto key encrypted with the password
+ */
+#define SMB2_NTLMV2_SESSKEY_SIZE       16
+#define SMB2_SIGNATURE_SIZE            16
+#define SMB2_HMACSHA256_SIZE           32
+#define SMB2_CMACAES_SIZE              16
+#define SMB3_GCM128_CRYPTKEY_SIZE      16
+#define SMB3_GCM256_CRYPTKEY_SIZE      32
+
+/*
+ * Size of the smb3 encryption/decryption keys
+ * This size is big enough to store any cipher key types.
+ */
+#define SMB3_ENC_DEC_KEY_SIZE          32
+
+/*
+ * Size of the smb3 signing key
+ */
+#define SMB3_SIGN_KEY_SIZE             16
+
+#define CIFS_CLIENT_CHALLENGE_SIZE     8
+
+/* Maximum buffer size value we can send with 1 credit */
+#define SMB2_MAX_BUFFER_SIZE 65536
+
+/*
+ * The default wsize is 1M for SMB2 (and for some CIFS cases).
+ * find_get_pages seems to return a maximum of 256
+ * pages in a single call. With PAGE_SIZE == 4k, this means we can
+ * fill a single wsize request with a single call.
+ */
+#define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024)
+
 /*
  * SMB2 Header Definition
  *
 #define SMB2_FLAGS_DFS_OPERATIONS      cpu_to_le32(0x10000000)
 #define SMB2_FLAGS_REPLAY_OPERATION    cpu_to_le32(0x20000000) /* SMB3 & up */
 
+/*
+ *     Definitions for SMB2 Protocol Data Units (network frames)
+ *
+ *  See MS-SMB2.PDF specification for protocol details.
+ *  The Naming convention is the lower case version of the SMB2
+ *  command code name for the struct. Note that structures must be packed.
+ *
+ */
+
 /* See MS-SMB2 section 2.2.1 */
 struct smb2_hdr {
        __le32 ProtocolId;      /* 0xFE 'S' 'M' 'B' */
@@ -115,6 +158,18 @@ struct smb2_pdu {
        __le16 StructureSize2; /* size of wct area (varies, request specific) */
 } __packed;
 
+#define SMB2_ERROR_STRUCTURE_SIZE2     9
+#define SMB2_ERROR_STRUCTURE_SIZE2_LE  cpu_to_le16(SMB2_ERROR_STRUCTURE_SIZE2)
+
+struct smb2_err_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize;
+       __u8   ErrorContextCount;
+       __u8   Reserved;
+       __le32 ByteCount;  /* even if zero, at least one byte follows */
+       __u8   ErrorData[1];  /* variable length */
+} __packed;
+
 #define SMB3_AES_CCM_NONCE 11
 #define SMB3_AES_GCM_NONCE 12
 
@@ -608,8 +663,8 @@ struct smb2_close_req {
        __le16 StructureSize;   /* Must be 24 */
        __le16 Flags;
        __le32 Reserved;
-       __le64  PersistentFileId; /* opaque endianness */
-       __le64  VolatileFileId; /* opaque endianness */
+       __u64  PersistentFileId; /* opaque endianness */
+       __u64  VolatileFileId; /* opaque endianness */
 } __packed;
 
 /*
@@ -653,8 +708,8 @@ struct smb2_read_req {
        __u8   Flags; /* MBZ unless SMB3.02 or later */
        __le32 Length;
        __le64 Offset;
-       __le64  PersistentFileId;
-       __le64  VolatileFileId;
+       __u64  PersistentFileId;
+       __u64  VolatileFileId;
        __le32 MinimumCount;
        __le32 Channel; /* MBZ except for SMB3 or later */
        __le32 RemainingBytes;
@@ -692,8 +747,8 @@ struct smb2_write_req {
        __le16 DataOffset; /* offset from start of SMB2 header to write data */
        __le32 Length;
        __le64 Offset;
-       __le64  PersistentFileId; /* opaque endianness */
-       __le64  VolatileFileId; /* opaque endianness */
+       __u64  PersistentFileId; /* opaque endianness */
+       __u64  VolatileFileId; /* opaque endianness */
        __le32 Channel; /* MBZ unless SMB3.02 or later */
        __le32 RemainingBytes;
        __le16 WriteChannelInfoOffset;
@@ -722,8 +777,8 @@ struct smb2_flush_req {
        __le16 StructureSize;   /* Must be 24 */
        __le16 Reserved1;
        __le32 Reserved2;
-       __le64  PersistentFileId;
-       __le64  VolatileFileId;
+       __u64  PersistentFileId;
+       __u64  VolatileFileId;
 } __packed;
 
 struct smb2_flush_rsp {
@@ -732,6 +787,123 @@ struct smb2_flush_rsp {
        __le16 Reserved;
 } __packed;
 
+#define SMB2_LOCKFLAG_SHARED           0x0001
+#define SMB2_LOCKFLAG_EXCLUSIVE                0x0002
+#define SMB2_LOCKFLAG_UNLOCK           0x0004
+#define SMB2_LOCKFLAG_FAIL_IMMEDIATELY 0x0010
+#define SMB2_LOCKFLAG_MASK             0x0007
+
+struct smb2_lock_element {
+       __le64 Offset;
+       __le64 Length;
+       __le32 Flags;
+       __le32 Reserved;
+} __packed;
+
+struct smb2_lock_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 48 */
+       __le16 LockCount;
+       /*
+        * The least significant four bits are the index, the other 28 bits are
+        * the lock sequence number (0 to 64). See MS-SMB2 2.2.26
+        */
+       __le32 LockSequenceNumber;
+       __u64  PersistentFileId;
+       __u64  VolatileFileId;
+       /* Followed by at least one */
+       struct smb2_lock_element locks[1];
+} __packed;
+
+struct smb2_lock_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 4 */
+       __le16 Reserved;
+} __packed;
+
+struct smb2_echo_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize;   /* Must be 4 */
+       __u16  Reserved;
+} __packed;
+
+struct smb2_echo_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize;   /* Must be 4 */
+       __u16  Reserved;
+} __packed;
+
+/*
+ * Valid FileInformation classes for query directory
+ *
+ * Note that these are a subset of the (file) QUERY_INFO levels defined
+ * later in this file (but since QUERY_DIRECTORY uses equivalent numbers
+ * we do not redefine them here)
+ *
+ * FileDirectoryInfomation             0x01
+ * FileFullDirectoryInformation                0x02
+ * FileIdFullDirectoryInformation      0x26
+ * FileBothDirectoryInformation                0x03
+ * FileIdBothDirectoryInformation      0x25
+ * FileNamesInformation                        0x0C
+ * FileIdExtdDirectoryInformation      0x3C
+ */
+
+/* search (query_directory) Flags field */
+#define SMB2_RESTART_SCANS             0x01
+#define SMB2_RETURN_SINGLE_ENTRY       0x02
+#define SMB2_INDEX_SPECIFIED           0x04
+#define SMB2_REOPEN                    0x10
+
+struct smb2_query_directory_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 33 */
+       __u8   FileInformationClass;
+       __u8   Flags;
+       __le32 FileIndex;
+       __u64  PersistentFileId;
+       __u64  VolatileFileId;
+       __le16 FileNameOffset;
+       __le16 FileNameLength;
+       __le32 OutputBufferLength;
+       __u8   Buffer[1];
+} __packed;
+
+struct smb2_query_directory_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 9 */
+       __le16 OutputBufferOffset;
+       __le32 OutputBufferLength;
+       __u8   Buffer[1];
+} __packed;
+
+/*
+ * Maximum number of iovs we need for a set-info request.
+ * The largest one is rename/hardlink
+ * [0] : struct smb2_set_info_req + smb2_file_[rename|link]_info
+ * [1] : path
+ * [2] : compound padding
+ */
+#define SMB2_SET_INFO_IOV_SIZE 3
+
+struct smb2_set_info_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 33 */
+       __u8   InfoType;
+       __u8   FileInfoClass;
+       __le32 BufferLength;
+       __le16 BufferOffset;
+       __u16  Reserved;
+       __le32 AdditionalInformation;
+       __u64  PersistentFileId;
+       __u64  VolatileFileId;
+       __u8   Buffer[1];
+} __packed;
+
+struct smb2_set_info_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 2 */
+} __packed;
 
 /*
  * SMB2_NOTIFY  See MS-SMB2 section 2.2.35
@@ -769,8 +941,8 @@ struct smb2_change_notify_req {
        __le16  StructureSize;
        __le16  Flags;
        __le32  OutputBufferLength;
-       __le64  PersistentFileId; /* opaque endianness */
-       __le64  VolatileFileId; /* opaque endianness */
+       __u64   PersistentFileId; /* opaque endianness */
+       __u64   VolatileFileId; /* opaque endianness */
        __le32  CompletionFilter;
        __u32   Reserved;
 } __packed;
@@ -978,12 +1150,455 @@ struct smb2_create_rsp {
        __le64 EndofFile;
        __le32 FileAttributes;
        __le32 Reserved2;
-       __le64  PersistentFileId;
-       __le64  VolatileFileId;
+       __u64  PersistentFileId;
+       __u64  VolatileFileId;
        __le32 CreateContextsOffset;
        __le32 CreateContextsLength;
        __u8   Buffer[1];
 } __packed;
 
+struct create_posix {
+       struct create_context ccontext;
+       __u8    Name[16];
+       __le32  Mode;
+       __u32   Reserved;
+} __packed;
+
+#define SMB2_LEASE_NONE_LE                     cpu_to_le32(0x00)
+#define SMB2_LEASE_READ_CACHING_LE             cpu_to_le32(0x01)
+#define SMB2_LEASE_HANDLE_CACHING_LE           cpu_to_le32(0x02)
+#define SMB2_LEASE_WRITE_CACHING_LE            cpu_to_le32(0x04)
+
+#define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE   cpu_to_le32(0x02)
+
+#define SMB2_LEASE_KEY_SIZE                    16
+
+struct lease_context {
+       __u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
+       __le32 LeaseState;
+       __le32 LeaseFlags;
+       __le64 LeaseDuration;
+} __packed;
+
+struct lease_context_v2 {
+       __u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
+       __le32 LeaseState;
+       __le32 LeaseFlags;
+       __le64 LeaseDuration;
+       __u8 ParentLeaseKey[SMB2_LEASE_KEY_SIZE];
+       __le16 Epoch;
+       __le16 Reserved;
+} __packed;
+
+struct create_lease {
+       struct create_context ccontext;
+       __u8   Name[8];
+       struct lease_context lcontext;
+} __packed;
+
+struct create_lease_v2 {
+       struct create_context ccontext;
+       __u8   Name[8];
+       struct lease_context_v2 lcontext;
+       __u8   Pad[4];
+} __packed;
+
+/* See MS-SMB2 2.2.31 and 2.2.32 */
+struct smb2_ioctl_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 57 */
+       __le16 Reserved; /* offset from start of SMB2 header to write data */
+       __le32 CtlCode;
+       __u64  PersistentFileId;
+       __u64  VolatileFileId;
+       __le32 InputOffset; /* Reserved MBZ */
+       __le32 InputCount;
+       __le32 MaxInputResponse;
+       __le32 OutputOffset;
+       __le32 OutputCount;
+       __le32 MaxOutputResponse;
+       __le32 Flags;
+       __le32 Reserved2;
+       __u8   Buffer[];
+} __packed;
+
+struct smb2_ioctl_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 49 */
+       __le16 Reserved;
+       __le32 CtlCode;
+       __u64  PersistentFileId;
+       __u64  VolatileFileId;
+       __le32 InputOffset; /* Reserved MBZ */
+       __le32 InputCount;
+       __le32 OutputOffset;
+       __le32 OutputCount;
+       __le32 Flags;
+       __le32 Reserved2;
+       __u8   Buffer[];
+} __packed;
+
+/* this goes in the ioctl buffer when doing FSCTL_SET_ZERO_DATA */
+struct file_zero_data_information {
+       __le64  FileOffset;
+       __le64  BeyondFinalZero;
+} __packed;
+
+/* Reparse structures - see MS-FSCC 2.1.2 */
+
+/* struct fsctl_reparse_info_req is empty, only response structs (see below) */
+struct reparse_data_buffer {
+       __le32  ReparseTag;
+       __le16  ReparseDataLength;
+       __u16   Reserved;
+       __u8    DataBuffer[]; /* Variable Length */
+} __packed;
+
+struct reparse_guid_data_buffer {
+       __le32  ReparseTag;
+       __le16  ReparseDataLength;
+       __u16   Reserved;
+       __u8    ReparseGuid[16];
+       __u8    DataBuffer[]; /* Variable Length */
+} __packed;
+
+struct reparse_mount_point_data_buffer {
+       __le32  ReparseTag;
+       __le16  ReparseDataLength;
+       __u16   Reserved;
+       __le16  SubstituteNameOffset;
+       __le16  SubstituteNameLength;
+       __le16  PrintNameOffset;
+       __le16  PrintNameLength;
+       __u8    PathBuffer[]; /* Variable Length */
+} __packed;
+
+#define SYMLINK_FLAG_RELATIVE 0x00000001
+
+struct reparse_symlink_data_buffer {
+       __le32  ReparseTag;
+       __le16  ReparseDataLength;
+       __u16   Reserved;
+       __le16  SubstituteNameOffset;
+       __le16  SubstituteNameLength;
+       __le16  PrintNameOffset;
+       __le16  PrintNameLength;
+       __le32  Flags;
+       __u8    PathBuffer[]; /* Variable Length */
+} __packed;
+
+/* See MS-FSCC 2.1.2.6 and cifspdu.h for struct reparse_posix_data */
+
+struct validate_negotiate_info_req {
+       __le32 Capabilities;
+       __u8   Guid[SMB2_CLIENT_GUID_SIZE];
+       __le16 SecurityMode;
+       __le16 DialectCount;
+       __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */
+} __packed;
+
+struct validate_negotiate_info_rsp {
+       __le32 Capabilities;
+       __u8   Guid[SMB2_CLIENT_GUID_SIZE];
+       __le16 SecurityMode;
+       __le16 Dialect; /* Dialect in use for the connection */
+} __packed;
+
+struct duplicate_extents_to_file {
+       __u64 PersistentFileHandle; /* source file handle, opaque endianness */
+       __u64 VolatileFileHandle;
+       __le64 SourceFileOffset;
+       __le64 TargetFileOffset;
+       __le64 ByteCount;  /* Bytes to be copied */
+} __packed;
+
+/* Possible InfoType values */
+#define SMB2_O_INFO_FILE       0x01
+#define SMB2_O_INFO_FILESYSTEM 0x02
+#define SMB2_O_INFO_SECURITY   0x03
+#define SMB2_O_INFO_QUOTA      0x04
+
+/* SMB2 Query Info see MS-SMB2 (2.2.37) or MS-DTYP */
+
+/* List of QUERY INFO levels (those also valid for QUERY_DIR are noted below */
+#define FILE_DIRECTORY_INFORMATION     1       /* also for QUERY_DIR */
+#define FILE_FULL_DIRECTORY_INFORMATION 2      /* also for QUERY_DIR */
+#define FILE_BOTH_DIRECTORY_INFORMATION 3      /* also for QUERY_DIR */
+#define FILE_BASIC_INFORMATION         4
+#define FILE_STANDARD_INFORMATION      5
+#define FILE_INTERNAL_INFORMATION      6
+#define FILE_EA_INFORMATION            7
+#define FILE_ACCESS_INFORMATION                8
+#define FILE_NAME_INFORMATION          9
+#define FILE_RENAME_INFORMATION                10
+#define FILE_LINK_INFORMATION          11
+#define FILE_NAMES_INFORMATION         12      /* also for QUERY_DIR */
+#define FILE_DISPOSITION_INFORMATION   13
+#define FILE_POSITION_INFORMATION      14
+#define FILE_FULL_EA_INFORMATION       15
+#define FILE_MODE_INFORMATION          16
+#define FILE_ALIGNMENT_INFORMATION     17
+#define FILE_ALL_INFORMATION           18
+#define FILE_ALLOCATION_INFORMATION    19
+#define FILE_END_OF_FILE_INFORMATION   20
+#define FILE_ALTERNATE_NAME_INFORMATION 21
+#define FILE_STREAM_INFORMATION                22
+#define FILE_PIPE_INFORMATION          23
+#define FILE_PIPE_LOCAL_INFORMATION    24
+#define FILE_PIPE_REMOTE_INFORMATION   25
+#define FILE_MAILSLOT_QUERY_INFORMATION 26
+#define FILE_MAILSLOT_SET_INFORMATION  27
+#define FILE_COMPRESSION_INFORMATION   28
+#define FILE_OBJECT_ID_INFORMATION     29
+/* Number 30 not defined in documents */
+#define FILE_MOVE_CLUSTER_INFORMATION  31
+#define FILE_QUOTA_INFORMATION         32
+#define FILE_REPARSE_POINT_INFORMATION 33
+#define FILE_NETWORK_OPEN_INFORMATION  34
+#define FILE_ATTRIBUTE_TAG_INFORMATION 35
+#define FILE_TRACKING_INFORMATION      36
+#define FILEID_BOTH_DIRECTORY_INFORMATION 37   /* also for QUERY_DIR */
+#define FILEID_FULL_DIRECTORY_INFORMATION 38   /* also for QUERY_DIR */
+#define FILE_VALID_DATA_LENGTH_INFORMATION 39
+#define FILE_SHORT_NAME_INFORMATION    40
+#define FILE_SFIO_RESERVE_INFORMATION  44
+#define FILE_SFIO_VOLUME_INFORMATION   45
+#define FILE_HARD_LINK_INFORMATION     46
+#define FILE_NORMALIZED_NAME_INFORMATION 48
+#define FILEID_GLOBAL_TX_DIRECTORY_INFORMATION 50
+#define FILE_STANDARD_LINK_INFORMATION 54
+#define FILE_ID_INFORMATION            59
+#define FILE_ID_EXTD_DIRECTORY_INFORMATION 60  /* also for QUERY_DIR */
+/* Used for Query Info and Find File POSIX Info for SMB3.1.1 and SMB1 */
+#define SMB_FIND_FILE_POSIX_INFO       0x064
+
+/* Security info type additionalinfo flags. */
+#define OWNER_SECINFO   0x00000001
+#define GROUP_SECINFO   0x00000002
+#define DACL_SECINFO   0x00000004
+#define SACL_SECINFO   0x00000008
+#define LABEL_SECINFO   0x00000010
+#define ATTRIBUTE_SECINFO   0x00000020
+#define SCOPE_SECINFO   0x00000040
+#define BACKUP_SECINFO   0x00010000
+#define UNPROTECTED_SACL_SECINFO   0x10000000
+#define UNPROTECTED_DACL_SECINFO   0x20000000
+#define PROTECTED_SACL_SECINFO   0x40000000
+#define PROTECTED_DACL_SECINFO   0x80000000
+
+/* Flags used for FileFullEAinfo */
+#define SL_RESTART_SCAN                0x00000001
+#define SL_RETURN_SINGLE_ENTRY 0x00000002
+#define SL_INDEX_SPECIFIED     0x00000004
+
+struct smb2_query_info_req {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 41 */
+       __u8   InfoType;
+       __u8   FileInfoClass;
+       __le32 OutputBufferLength;
+       __le16 InputBufferOffset;
+       __u16  Reserved;
+       __le32 InputBufferLength;
+       __le32 AdditionalInformation;
+       __le32 Flags;
+       __u64  PersistentFileId;
+       __u64  VolatileFileId;
+       __u8   Buffer[1];
+} __packed;
+
+struct smb2_query_info_rsp {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 9 */
+       __le16 OutputBufferOffset;
+       __le32 OutputBufferLength;
+       __u8   Buffer[1];
+} __packed;
+
+/*
+ *     PDU query infolevel structure definitions
+ */
+
+struct file_allocated_range_buffer {
+       __le64  file_offset;
+       __le64  length;
+} __packed;
+
+struct smb2_file_internal_info {
+       __le64 IndexNumber;
+} __packed; /* level 6 Query */
+
+struct smb2_file_rename_info { /* encoding of request for level 10 */
+       __u8   ReplaceIfExists; /* 1 = replace existing target with new */
+                               /* 0 = fail if target already exists */
+       __u8   Reserved[7];
+       __u64  RootDirectory;  /* MBZ for network operations (why says spec?) */
+       __le32 FileNameLength;
+       char   FileName[];     /* New name to be assigned */
+       /* padding - overall struct size must be >= 24 so filename + pad >= 6 */
+} __packed; /* level 10 Set */
+
+struct smb2_file_link_info { /* encoding of request for level 11 */
+       __u8   ReplaceIfExists; /* 1 = replace existing link with new */
+                               /* 0 = fail if link already exists */
+       __u8   Reserved[7];
+       __u64  RootDirectory;  /* MBZ for network operations (why says spec?) */
+       __le32 FileNameLength;
+       char   FileName[];     /* Name to be assigned to new link */
+} __packed; /* level 11 Set */
+
+/*
+ * This level 18, although with struct with same name is different from cifs
+ * level 0x107. Level 0x107 has an extra u64 between AccessFlags and
+ * CurrentByteOffset.
+ */
+struct smb2_file_all_info { /* data block encoding of response to level 18 */
+       __le64 CreationTime;    /* Beginning of FILE_BASIC_INFO equivalent */
+       __le64 LastAccessTime;
+       __le64 LastWriteTime;
+       __le64 ChangeTime;
+       __le32 Attributes;
+       __u32  Pad1;            /* End of FILE_BASIC_INFO_INFO equivalent */
+       __le64 AllocationSize;  /* Beginning of FILE_STANDARD_INFO equivalent */
+       __le64 EndOfFile;       /* size ie offset to first free byte in file */
+       __le32 NumberOfLinks;   /* hard links */
+       __u8   DeletePending;
+       __u8   Directory;
+       __u16  Pad2;            /* End of FILE_STANDARD_INFO equivalent */
+       __le64 IndexNumber;
+       __le32 EASize;
+       __le32 AccessFlags;
+       __le64 CurrentByteOffset;
+       __le32 Mode;
+       __le32 AlignmentRequirement;
+       __le32 FileNameLength;
+       char   FileName[1];
+} __packed; /* level 18 Query */
+
+struct smb2_file_eof_info { /* encoding of request for level 10 */
+       __le64 EndOfFile; /* new end of file value */
+} __packed; /* level 20 Set */
+
+/* Level 100 query info */
+struct smb311_posix_qinfo {
+       __le64 CreationTime;
+       __le64 LastAccessTime;
+       __le64 LastWriteTime;
+       __le64 ChangeTime;
+       __le64 EndOfFile;
+       __le64 AllocationSize;
+       __le32 DosAttributes;
+       __le64 Inode;
+       __le32 DeviceId;
+       __le32 Zero;
+       /* beginning of POSIX Create Context Response */
+       __le32 HardLinks;
+       __le32 ReparseTag;
+       __le32 Mode;
+       u8     Sids[];
+       /*
+        * var sized owner SID
+        * var sized group SID
+        * le32 filenamelength
+        * u8  filename[]
+        */
+} __packed;
+
+/* File System Information Classes */
+#define FS_VOLUME_INFORMATION          1 /* Query */
+#define FS_LABEL_INFORMATION           2 /* Set */
+#define FS_SIZE_INFORMATION            3 /* Query */
+#define FS_DEVICE_INFORMATION          4 /* Query */
+#define FS_ATTRIBUTE_INFORMATION       5 /* Query */
+#define FS_CONTROL_INFORMATION         6 /* Query, Set */
+#define FS_FULL_SIZE_INFORMATION       7 /* Query */
+#define FS_OBJECT_ID_INFORMATION       8 /* Query, Set */
+#define FS_DRIVER_PATH_INFORMATION     9 /* Query */
+#define FS_SECTOR_SIZE_INFORMATION     11 /* SMB3 or later. Query */
+#define FS_POSIX_INFORMATION           100 /* SMB3.1.1 POSIX. Query */
+
+struct smb2_fs_full_size_info {
+       __le64 TotalAllocationUnits;
+       __le64 CallerAvailableAllocationUnits;
+       __le64 ActualAvailableAllocationUnits;
+       __le32 SectorsPerAllocationUnit;
+       __le32 BytesPerSector;
+} __packed;
+
+#define SSINFO_FLAGS_ALIGNED_DEVICE            0x00000001
+#define SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE 0x00000002
+#define SSINFO_FLAGS_NO_SEEK_PENALTY           0x00000004
+#define SSINFO_FLAGS_TRIM_ENABLED              0x00000008
+
+/* sector size info struct */
+struct smb3_fs_ss_info {
+       __le32 LogicalBytesPerSector;
+       __le32 PhysicalBytesPerSectorForAtomicity;
+       __le32 PhysicalBytesPerSectorForPerf;
+       __le32 FSEffPhysicalBytesPerSectorForAtomicity;
+       __le32 Flags;
+       __le32 ByteOffsetForSectorAlignment;
+       __le32 ByteOffsetForPartitionAlignment;
+} __packed;
+
+/* File System Control Information */
+struct smb2_fs_control_info {
+       __le64 FreeSpaceStartFiltering;
+       __le64 FreeSpaceThreshold;
+       __le64 FreeSpaceStopFiltering;
+       __le64 DefaultQuotaThreshold;
+       __le64 DefaultQuotaLimit;
+       __le32 FileSystemControlFlags;
+       __le32 Padding;
+} __packed;
+
+/* volume info struct - see MS-FSCC 2.5.9 */
+#define MAX_VOL_LABEL_LEN      32
+struct smb3_fs_vol_info {
+       __le64  VolumeCreationTime;
+       __u32   VolumeSerialNumber;
+       __le32  VolumeLabelLength; /* includes trailing null */
+       __u8    SupportsObjects; /* True if eg like NTFS, supports objects */
+       __u8    Reserved;
+       __u8    VolumeLabel[]; /* variable len */
+} __packed;
+
+/* See MS-SMB2 2.2.23 through 2.2.25 */
+struct smb2_oplock_break {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 24 */
+       __u8   OplockLevel;
+       __u8   Reserved;
+       __le32 Reserved2;
+       __u64  PersistentFid;
+       __u64  VolatileFid;
+} __packed;
+
+#define SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED cpu_to_le32(0x01)
+
+struct smb2_lease_break {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 44 */
+       __le16 Epoch;
+       __le32 Flags;
+       __u8   LeaseKey[16];
+       __le32 CurrentLeaseState;
+       __le32 NewLeaseState;
+       __le32 BreakReason;
+       __le32 AccessMaskHint;
+       __le32 ShareMaskHint;
+} __packed;
+
+struct smb2_lease_ack {
+       struct smb2_hdr hdr;
+       __le16 StructureSize; /* Must be 36 */
+       __le16 Reserved;
+       __le32 Flags;
+       __u8   LeaseKey[16];
+       __le32 LeaseState;
+       __le64 LeaseDuration;
+} __packed;
 
+#define OP_BREAK_STRUCT_SIZE_20                24
+#define OP_BREAK_STRUCT_SIZE_21                36
 #endif                         /* _COMMON_SMB2PDU_H */
index 7f734be0e57ec9f5d69927a672d7e4b20a4b006e..5c2c94464e8b0242430dc050f96678c35511509d 100644 (file)
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -348,9 +348,6 @@ SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, stat
 #  define choose_32_64(a,b) b
 #endif
 
-#define valid_dev(x)  choose_32_64(old_valid_dev(x),true)
-#define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
-
 #ifndef INIT_STRUCT_STAT_PADDING
 #  define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
 #endif
@@ -359,7 +356,9 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
 {
        struct stat tmp;
 
-       if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
+       if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
+               return -EOVERFLOW;
+       if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
                return -EOVERFLOW;
 #if BITS_PER_LONG == 32
        if (stat->size > MAX_NON_LFS)
@@ -367,7 +366,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
 #endif
 
        INIT_STRUCT_STAT_PADDING(tmp);
-       tmp.st_dev = encode_dev(stat->dev);
+       tmp.st_dev = new_encode_dev(stat->dev);
        tmp.st_ino = stat->ino;
        if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
                return -EOVERFLOW;
@@ -377,7 +376,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
                return -EOVERFLOW;
        SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
        SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
-       tmp.st_rdev = encode_dev(stat->rdev);
+       tmp.st_rdev = new_encode_dev(stat->rdev);
        tmp.st_size = stat->size;
        tmp.st_atime = stat->atime.tv_sec;
        tmp.st_mtime = stat->mtime.tv_sec;
@@ -665,11 +664,13 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
 {
        struct compat_stat tmp;
 
-       if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
+       if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
+               return -EOVERFLOW;
+       if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
                return -EOVERFLOW;
 
        memset(&tmp, 0, sizeof(tmp));
-       tmp.st_dev = old_encode_dev(stat->dev);
+       tmp.st_dev = new_encode_dev(stat->dev);
        tmp.st_ino = stat->ino;
        if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
                return -EOVERFLOW;
@@ -679,7 +680,7 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
                return -EOVERFLOW;
        SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
        SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
-       tmp.st_rdev = old_encode_dev(stat->rdev);
+       tmp.st_rdev = new_encode_dev(stat->rdev);
        if ((u64) stat->size > MAX_NON_LFS)
                return -EOVERFLOW;
        tmp.st_size = stat->size;
index 42dcf96881b688f3474a76bb6b618eade3c2b959..a12ac0356c69cd409a856b5a551ce1768cf7ad16 100644 (file)
@@ -703,19 +703,6 @@ int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid)
 
        ktype = get_ktype(kobj);
        if (ktype) {
-               struct attribute **kattr;
-
-               /*
-                * Change owner of the default attributes associated with the
-                * ktype of @kobj.
-                */
-               for (kattr = ktype->default_attrs; kattr && *kattr; kattr++) {
-                       error = sysfs_file_change_owner(kobj, (*kattr)->name,
-                                                       kuid, kgid);
-                       if (error)
-                               return error;
-               }
-
                /*
                 * Change owner of the default groups associated with the
                 * ktype of @kobj.
index dbe72f664abf32f8eb945a79da42d10255e3d392..86151889548e3af45a4efc695ab5a6c68d985190 100644 (file)
@@ -349,20 +349,97 @@ out_budg:
        return err;
 }
 
-static int do_tmpfile(struct inode *dir, struct dentry *dentry,
-                     umode_t mode, struct inode **whiteout)
+static struct inode *create_whiteout(struct inode *dir, struct dentry *dentry)
+{
+       int err;
+       umode_t mode = S_IFCHR | WHITEOUT_MODE;
+       struct inode *inode;
+       struct ubifs_info *c = dir->i_sb->s_fs_info;
+       struct fscrypt_name nm;
+
+       /*
+        * Create an inode('nlink = 1') for whiteout without updating journal,
+        * let ubifs_jnl_rename() store it on flash to complete rename whiteout
+        * atomically.
+        */
+
+       dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
+               dentry, mode, dir->i_ino);
+
+       err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
+       if (err)
+               return ERR_PTR(err);
+
+       inode = ubifs_new_inode(c, dir, mode);
+       if (IS_ERR(inode)) {
+               err = PTR_ERR(inode);
+               goto out_free;
+       }
+
+       init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
+       ubifs_assert(c, inode->i_op == &ubifs_file_inode_operations);
+
+       err = ubifs_init_security(dir, inode, &dentry->d_name);
+       if (err)
+               goto out_inode;
+
+       /* The dir size is updated by do_rename. */
+       insert_inode_hash(inode);
+
+       return inode;
+
+out_inode:
+       make_bad_inode(inode);
+       iput(inode);
+out_free:
+       fscrypt_free_filename(&nm);
+       ubifs_err(c, "cannot create whiteout file, error %d", err);
+       return ERR_PTR(err);
+}
+
+/**
+ * lock_2_inodes - a wrapper for locking two UBIFS inodes.
+ * @inode1: first inode
+ * @inode2: second inode
+ *
+ * We do not implement any tricks to guarantee strict lock ordering, because
+ * VFS has already done it for us on the @i_mutex. So this is just a simple
+ * wrapper function.
+ */
+static void lock_2_inodes(struct inode *inode1, struct inode *inode2)
+{
+       mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1);
+       mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2);
+}
+
+/**
+ * unlock_2_inodes - a wrapper for unlocking two UBIFS inodes.
+ * @inode1: first inode
+ * @inode2: second inode
+ */
+static void unlock_2_inodes(struct inode *inode1, struct inode *inode2)
+{
+       mutex_unlock(&ubifs_inode(inode2)->ui_mutex);
+       mutex_unlock(&ubifs_inode(inode1)->ui_mutex);
+}
+
+static int ubifs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
+                        struct dentry *dentry, umode_t mode)
 {
        struct inode *inode;
        struct ubifs_info *c = dir->i_sb->s_fs_info;
-       struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1};
+       struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
+                                       .dirtied_ino = 1};
        struct ubifs_budget_req ino_req = { .dirtied_ino = 1 };
-       struct ubifs_inode *ui, *dir_ui = ubifs_inode(dir);
+       struct ubifs_inode *ui;
        int err, instantiated = 0;
        struct fscrypt_name nm;
 
        /*
-        * Budget request settings: new dirty inode, new direntry,
-        * budget for dirtied inode will be released via writeback.
+        * Budget request settings: new inode, new direntry, changing the
+        * parent directory inode.
+        * Allocate budget separately for new dirtied inode, the budget will
+        * be released via writeback.
         */
 
        dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
@@ -392,42 +469,30 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry,
        }
        ui = ubifs_inode(inode);
 
-       if (whiteout) {
-               init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
-               ubifs_assert(c, inode->i_op == &ubifs_file_inode_operations);
-       }
-
        err = ubifs_init_security(dir, inode, &dentry->d_name);
        if (err)
                goto out_inode;
 
        mutex_lock(&ui->ui_mutex);
        insert_inode_hash(inode);
-
-       if (whiteout) {
-               mark_inode_dirty(inode);
-               drop_nlink(inode);
-               *whiteout = inode;
-       } else {
-               d_tmpfile(dentry, inode);
-       }
+       d_tmpfile(dentry, inode);
        ubifs_assert(c, ui->dirty);
 
        instantiated = 1;
        mutex_unlock(&ui->ui_mutex);
 
-       mutex_lock(&dir_ui->ui_mutex);
+       lock_2_inodes(dir, inode);
        err = ubifs_jnl_update(c, dir, &nm, inode, 1, 0);
        if (err)
                goto out_cancel;
-       mutex_unlock(&dir_ui->ui_mutex);
+       unlock_2_inodes(dir, inode);
 
        ubifs_release_budget(c, &req);
 
        return 0;
 
 out_cancel:
-       mutex_unlock(&dir_ui->ui_mutex);
+       unlock_2_inodes(dir, inode);
 out_inode:
        make_bad_inode(inode);
        if (!instantiated)
@@ -441,12 +506,6 @@ out_budg:
        return err;
 }
 
-static int ubifs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
-                        struct dentry *dentry, umode_t mode)
-{
-       return do_tmpfile(dir, dentry, mode, NULL);
-}
-
 /**
  * vfs_dent_type - get VFS directory entry type.
  * @type: UBIFS directory entry type
@@ -660,32 +719,6 @@ static int ubifs_dir_release(struct inode *dir, struct file *file)
        return 0;
 }
 
-/**
- * lock_2_inodes - a wrapper for locking two UBIFS inodes.
- * @inode1: first inode
- * @inode2: second inode
- *
- * We do not implement any tricks to guarantee strict lock ordering, because
- * VFS has already done it for us on the @i_mutex. So this is just a simple
- * wrapper function.
- */
-static void lock_2_inodes(struct inode *inode1, struct inode *inode2)
-{
-       mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1);
-       mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2);
-}
-
-/**
- * unlock_2_inodes - a wrapper for unlocking two UBIFS inodes.
- * @inode1: first inode
- * @inode2: second inode
- */
-static void unlock_2_inodes(struct inode *inode1, struct inode *inode2)
-{
-       mutex_unlock(&ubifs_inode(inode2)->ui_mutex);
-       mutex_unlock(&ubifs_inode(inode1)->ui_mutex);
-}
-
 static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
                      struct dentry *dentry)
 {
@@ -949,7 +982,8 @@ static int ubifs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
        struct ubifs_inode *dir_ui = ubifs_inode(dir);
        struct ubifs_info *c = dir->i_sb->s_fs_info;
        int err, sz_change;
-       struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1 };
+       struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
+                                       .dirtied_ino = 1};
        struct fscrypt_name nm;
 
        /*
@@ -1264,17 +1298,19 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
                                        .dirtied_ino = 3 };
        struct ubifs_budget_req ino_req = { .dirtied_ino = 1,
                        .dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) };
+       struct ubifs_budget_req wht_req;
        struct timespec64 time;
        unsigned int saved_nlink;
        struct fscrypt_name old_nm, new_nm;
 
        /*
-        * Budget request settings: deletion direntry, new direntry, removing
-        * the old inode, and changing old and new parent directory inodes.
+        * Budget request settings:
+        *   req: deletion direntry, new direntry, removing the old inode,
+        *   and changing old and new parent directory inodes.
+        *
+        *   wht_req: new whiteout inode for RENAME_WHITEOUT.
         *
-        * However, this operation also marks the target inode as dirty and
-        * does not write it, so we allocate budget for the target inode
-        * separately.
+        *   ino_req: marks the target inode as dirty and does not write it.
         */
 
        dbg_gen("dent '%pd' ino %lu in dir ino %lu to dent '%pd' in dir ino %lu flags 0x%x",
@@ -1331,20 +1367,44 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
                        goto out_release;
                }
 
-               err = do_tmpfile(old_dir, old_dentry, S_IFCHR | WHITEOUT_MODE, &whiteout);
-               if (err) {
+               /*
+                * The whiteout inode without dentry is pinned in memory,
+                * umount won't happen during rename process because we
+                * got parent dentry.
+                */
+               whiteout = create_whiteout(old_dir, old_dentry);
+               if (IS_ERR(whiteout)) {
+                       err = PTR_ERR(whiteout);
                        kfree(dev);
                        goto out_release;
                }
 
-               spin_lock(&whiteout->i_lock);
-               whiteout->i_state |= I_LINKABLE;
-               spin_unlock(&whiteout->i_lock);
-
                whiteout_ui = ubifs_inode(whiteout);
                whiteout_ui->data = dev;
                whiteout_ui->data_len = ubifs_encode_dev(dev, MKDEV(0, 0));
                ubifs_assert(c, !whiteout_ui->dirty);
+
+               memset(&wht_req, 0, sizeof(struct ubifs_budget_req));
+               wht_req.new_ino = 1;
+               wht_req.new_ino_d = ALIGN(whiteout_ui->data_len, 8);
+               /*
+                * To avoid deadlock between space budget (holds ui_mutex and
+                * waits wb work) and writeback work(waits ui_mutex), do space
+                * budget before ubifs inodes locked.
+                */
+               err = ubifs_budget_space(c, &wht_req);
+               if (err) {
+                       /*
+                        * Whiteout inode can not be written on flash by
+                        * ubifs_jnl_write_inode(), because it's neither
+                        * dirty nor zero-nlink.
+                        */
+                       iput(whiteout);
+                       goto out_release;
+               }
+
+               /* Add the old_dentry size to the old_dir size. */
+               old_sz -= CALC_DENT_SIZE(fname_len(&old_nm));
        }
 
        lock_4_inodes(old_dir, new_dir, new_inode, whiteout);
@@ -1416,29 +1476,11 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
                sync = IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir);
                if (unlink && IS_SYNC(new_inode))
                        sync = 1;
-       }
-
-       if (whiteout) {
-               struct ubifs_budget_req wht_req = { .dirtied_ino = 1,
-                               .dirtied_ino_d = \
-                               ALIGN(ubifs_inode(whiteout)->data_len, 8) };
-
-               err = ubifs_budget_space(c, &wht_req);
-               if (err) {
-                       kfree(whiteout_ui->data);
-                       whiteout_ui->data_len = 0;
-                       iput(whiteout);
-                       goto out_release;
-               }
-
-               inc_nlink(whiteout);
-               mark_inode_dirty(whiteout);
-
-               spin_lock(&whiteout->i_lock);
-               whiteout->i_state &= ~I_LINKABLE;
-               spin_unlock(&whiteout->i_lock);
-
-               iput(whiteout);
+               /*
+                * S_SYNC flag of whiteout inherits from the old_dir, and we
+                * have already checked the old dir inode. So there is no need
+                * to check whiteout.
+                */
        }
 
        err = ubifs_jnl_rename(c, old_dir, old_inode, &old_nm, new_dir,
@@ -1449,6 +1491,11 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
        unlock_4_inodes(old_dir, new_dir, new_inode, whiteout);
        ubifs_release_budget(c, &req);
 
+       if (whiteout) {
+               ubifs_release_budget(c, &wht_req);
+               iput(whiteout);
+       }
+
        mutex_lock(&old_inode_ui->ui_mutex);
        release = old_inode_ui->dirty;
        mark_inode_dirty_sync(old_inode);
@@ -1457,11 +1504,16 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (release)
                ubifs_release_budget(c, &ino_req);
        if (IS_SYNC(old_inode))
-               err = old_inode->i_sb->s_op->write_inode(old_inode, NULL);
+               /*
+                * Rename finished here. Although old inode cannot be updated
+                * on flash, old ctime is not a big problem, don't return err
+                * code to userspace.
+                */
+               old_inode->i_sb->s_op->write_inode(old_inode, NULL);
 
        fscrypt_free_filename(&old_nm);
        fscrypt_free_filename(&new_nm);
-       return err;
+       return 0;
 
 out_cancel:
        if (unlink) {
@@ -1482,11 +1534,11 @@ out_cancel:
                                inc_nlink(old_dir);
                }
        }
+       unlock_4_inodes(old_dir, new_dir, new_inode, whiteout);
        if (whiteout) {
-               drop_nlink(whiteout);
+               ubifs_release_budget(c, &wht_req);
                iput(whiteout);
        }
-       unlock_4_inodes(old_dir, new_dir, new_inode, whiteout);
 out_release:
        ubifs_release_budget(c, &ino_req);
        ubifs_release_budget(c, &req);
index 8a9ffc2d4167aeaceebd0f2b48fda352aa1bd3fc..0383fbdc95ff1d2f199897221215db7a0f2ef97d 100644 (file)
@@ -570,7 +570,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
        }
 
        if (!PagePrivate(page)) {
-               SetPagePrivate(page);
+               attach_page_private(page, (void *)1);
                atomic_long_inc(&c->dirty_pg_cnt);
                __set_page_dirty_nobuffers(page);
        }
@@ -947,7 +947,7 @@ static int do_writepage(struct page *page, int len)
                release_existing_page_budget(c);
 
        atomic_long_dec(&c->dirty_pg_cnt);
-       ClearPagePrivate(page);
+       detach_page_private(page);
        ClearPageChecked(page);
 
        kunmap(page);
@@ -1304,7 +1304,7 @@ static void ubifs_invalidate_folio(struct folio *folio, size_t offset,
                release_existing_page_budget(c);
 
        atomic_long_dec(&c->dirty_pg_cnt);
-       folio_clear_private(folio);
+       folio_detach_private(folio);
        folio_clear_checked(folio);
 }
 
@@ -1471,8 +1471,8 @@ static int ubifs_migrate_page(struct address_space *mapping,
                return rc;
 
        if (PagePrivate(page)) {
-               ClearPagePrivate(page);
-               SetPagePrivate(newpage);
+               detach_page_private(page);
+               attach_page_private(newpage, (void *)1);
        }
 
        if (mode != MIGRATE_SYNC_NO_COPY)
@@ -1496,7 +1496,7 @@ static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
                return 0;
        ubifs_assert(c, PagePrivate(page));
        ubifs_assert(c, 0);
-       ClearPagePrivate(page);
+       detach_page_private(page);
        ClearPageChecked(page);
        return 1;
 }
@@ -1567,7 +1567,7 @@ static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
        else {
                if (!PageChecked(page))
                        ubifs_convert_page_budget(c);
-               SetPagePrivate(page);
+               attach_page_private(page, (void *)1);
                atomic_long_inc(&c->dirty_pg_cnt);
                __set_page_dirty_nobuffers(page);
        }
index 789a7813f3fa29d9e48ee35029261538879e5797..1607a3c76681a287e1c9c81f79ab06f19f96efc5 100644 (file)
@@ -854,16 +854,42 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
         */
        n = aligned_len >> c->max_write_shift;
        if (n) {
-               n <<= c->max_write_shift;
+               int m = n - 1;
+
                dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum,
                       wbuf->offs);
-               err = ubifs_leb_write(c, wbuf->lnum, buf + written,
-                                     wbuf->offs, n);
+
+               if (m) {
+                       /* '(n-1)<<c->max_write_shift < len' is always true. */
+                       m <<= c->max_write_shift;
+                       err = ubifs_leb_write(c, wbuf->lnum, buf + written,
+                                             wbuf->offs, m);
+                       if (err)
+                               goto out;
+                       wbuf->offs += m;
+                       aligned_len -= m;
+                       len -= m;
+                       written += m;
+               }
+
+               /*
+                * The non-written len of buf may be less than 'n' because
+                * parameter 'len' is not 8 bytes aligned, so here we read
+                * min(len, n) bytes from buf.
+                */
+               n = 1 << c->max_write_shift;
+               memcpy(wbuf->buf, buf + written, min(len, n));
+               if (n > len) {
+                       ubifs_assert(c, n - len < 8);
+                       ubifs_pad(c, wbuf->buf + len, n - len);
+               }
+
+               err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, n);
                if (err)
                        goto out;
                wbuf->offs += n;
                aligned_len -= n;
-               len -= n;
+               len -= min(len, n);
                written += n;
        }
 
index c6a86348778037fd0d06e0b10fe835f841988f93..71bcebe45f9c588a8cd30805d8ee091a85b6e536 100644 (file)
@@ -108,7 +108,7 @@ static int setflags(struct inode *inode, int flags)
        struct ubifs_inode *ui = ubifs_inode(inode);
        struct ubifs_info *c = inode->i_sb->s_fs_info;
        struct ubifs_budget_req req = { .dirtied_ino = 1,
-                                       .dirtied_ino_d = ui->data_len };
+                       .dirtied_ino_d = ALIGN(ui->data_len, 8) };
 
        err = ubifs_budget_space(c, &req);
        if (err)
index 8ea680dba61e39c4a41b821ae7b048c6a716ed98..75dab0ae3939d0a9011fad14242fe01b81380a52 100644 (file)
@@ -1207,9 +1207,9 @@ out_free:
  * @sync: non-zero if the write-buffer has to be synchronized
  *
  * This function implements the re-name operation which may involve writing up
- * to 4 inodes and 2 directory entries. It marks the written inodes as clean
- * and returns zero on success. In case of failure, a negative error code is
- * returned.
+ * to 4 inodes(new inode, whiteout inode, old and new parent directory inodes)
+ * and 2 directory entries. It marks the written inodes as clean and returns
+ * zero on success. In case of failure, a negative error code is returned.
  */
 int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
                     const struct inode *old_inode,
@@ -1222,14 +1222,15 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
        void *p;
        union ubifs_key key;
        struct ubifs_dent_node *dent, *dent2;
-       int err, dlen1, dlen2, ilen, lnum, offs, len, orphan_added = 0;
+       int err, dlen1, dlen2, ilen, wlen, lnum, offs, len, orphan_added = 0;
        int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ;
        int last_reference = !!(new_inode && new_inode->i_nlink == 0);
        int move = (old_dir != new_dir);
-       struct ubifs_inode *new_ui;
+       struct ubifs_inode *new_ui, *whiteout_ui;
        u8 hash_old_dir[UBIFS_HASH_ARR_SZ];
        u8 hash_new_dir[UBIFS_HASH_ARR_SZ];
        u8 hash_new_inode[UBIFS_HASH_ARR_SZ];
+       u8 hash_whiteout_inode[UBIFS_HASH_ARR_SZ];
        u8 hash_dent1[UBIFS_HASH_ARR_SZ];
        u8 hash_dent2[UBIFS_HASH_ARR_SZ];
 
@@ -1249,9 +1250,20 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
        } else
                ilen = 0;
 
+       if (whiteout) {
+               whiteout_ui = ubifs_inode(whiteout);
+               ubifs_assert(c, mutex_is_locked(&whiteout_ui->ui_mutex));
+               ubifs_assert(c, whiteout->i_nlink == 1);
+               ubifs_assert(c, !whiteout_ui->dirty);
+               wlen = UBIFS_INO_NODE_SZ;
+               wlen += whiteout_ui->data_len;
+       } else
+               wlen = 0;
+
        aligned_dlen1 = ALIGN(dlen1, 8);
        aligned_dlen2 = ALIGN(dlen2, 8);
-       len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8);
+       len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) +
+             ALIGN(wlen, 8) + ALIGN(plen, 8);
        if (move)
                len += plen;
 
@@ -1313,6 +1325,15 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
                p += ALIGN(ilen, 8);
        }
 
+       if (whiteout) {
+               pack_inode(c, p, whiteout, 0);
+               err = ubifs_node_calc_hash(c, p, hash_whiteout_inode);
+               if (err)
+                       goto out_release;
+
+               p += ALIGN(wlen, 8);
+       }
+
        if (!move) {
                pack_inode(c, p, old_dir, 1);
                err = ubifs_node_calc_hash(c, p, hash_old_dir);
@@ -1352,6 +1373,9 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
                if (new_inode)
                        ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
                                                  new_inode->i_ino);
+               if (whiteout)
+                       ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
+                                                 whiteout->i_ino);
        }
        release_head(c, BASEHD);
 
@@ -1368,8 +1392,6 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
                err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, old_nm);
                if (err)
                        goto out_ro;
-
-               ubifs_delete_orphan(c, whiteout->i_ino);
        } else {
                err = ubifs_add_dirt(c, lnum, dlen2);
                if (err)
@@ -1390,6 +1412,15 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
                offs += ALIGN(ilen, 8);
        }
 
+       if (whiteout) {
+               ino_key_init(c, &key, whiteout->i_ino);
+               err = ubifs_tnc_add(c, &key, lnum, offs, wlen,
+                                   hash_whiteout_inode);
+               if (err)
+                       goto out_ro;
+               offs += ALIGN(wlen, 8);
+       }
+
        ino_key_init(c, &key, old_dir->i_ino);
        err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_old_dir);
        if (err)
@@ -1410,6 +1441,11 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
                new_ui->synced_i_size = new_ui->ui_size;
                spin_unlock(&new_ui->ui_lock);
        }
+       /*
+        * No need to mark whiteout inode clean.
+        * Whiteout doesn't have non-zero size, no need to update
+        * synced_i_size for whiteout_ui.
+        */
        mark_inode_clean(c, ubifs_inode(old_dir));
        if (move)
                mark_inode_clean(c, ubifs_inode(new_dir));
index f55828c0a3004f873f6d1796690e2e47a5726787..008fa46ef61e728c115979c7d8213de6c1ff33ba 100644 (file)
@@ -381,7 +381,7 @@ struct ubifs_gced_idx_leb {
  * @ui_mutex exists for two main reasons. At first it prevents inodes from
  * being written back while UBIFS changing them, being in the middle of an VFS
  * operation. This way UBIFS makes sure the inode fields are consistent. For
- * example, in 'ubifs_rename()' we change 3 inodes simultaneously, and
+ * example, in 'ubifs_rename()' we change 4 inodes simultaneously, and
  * write-back must not write any of them before we have finished.
  *
  * The second reason is budgeting - UBIFS has to budget all operations. If an
index 0cc87423de828082c2320fe5e5e7d54969258a2b..0e51c0025a16fd753ba7a65c8131b928e9a8ee39 100644 (file)
@@ -33,7 +33,7 @@ $(obj)/utf8data.c: $(obj)/mkutf8data $(filter %.txt, $(cmd_utf8data)) FORCE
 else
 
 $(obj)/utf8data.c: $(src)/utf8data.c_shipped FORCE
-       $(call if_changed,shipped)
+       $(call if_changed,copy)
 
 endif
 
index 0adb970f4e7368a3c138edfe66a46a714d47b3d0..14e2fb49cff561a9a16ffa71539c0e340fe67e92 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Data verification functions, i.e. hooks for ->readpages()
+ * Data verification functions, i.e. hooks for ->readahead()
  *
  * Copyright 2019 Google LLC
  */
@@ -214,7 +214,7 @@ EXPORT_SYMBOL_GPL(fsverity_verify_page);
  * that fail verification are set to the Error state.  Verification is skipped
  * for pages already in the Error state, e.g. due to fscrypt decryption failure.
  *
- * This is a helper function for use by the ->readpages() method of filesystems
+ * This is a helper function for use by the ->readahead() method of filesystems
  * that issue bios to read data directly into the page cache.  Filesystems that
  * populate the page cache without issuing bios (e.g. non block-based
  * filesystems) must instead call fsverity_verify_page() directly on each page.
index 5c8c5175b385c274eaac9a239ba0749b9372ff98..998045165916edcd1f5d4215011dea8f8cb08d7b 100644 (file)
@@ -569,7 +569,8 @@ setxattr(struct user_namespace *mnt_userns, struct dentry *d,
                }
                if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
                    (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
-                       posix_acl_fix_xattr_from_user(mnt_userns, kvalue, size);
+                       posix_acl_fix_xattr_from_user(mnt_userns, d_inode(d),
+                                                     kvalue, size);
        }
 
        error = vfs_setxattr(mnt_userns, d, kname, kvalue, size, flags);
@@ -667,7 +668,8 @@ getxattr(struct user_namespace *mnt_userns, struct dentry *d,
        if (error > 0) {
                if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
                    (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
-                       posix_acl_fix_xattr_to_user(mnt_userns, kvalue, error);
+                       posix_acl_fix_xattr_to_user(mnt_userns, d_inode(d),
+                                                   kvalue, error);
                if (size && copy_to_user(value, kvalue, error))
                        error = -EFAULT;
        } else if (error == -ERANGE && size >= XATTR_SIZE_MAX) {
index 353e53b892e6eb2973e9c582d491a58e045f6eab..b52ed339727f0324fe18b2a345f53c958533a536 100644 (file)
@@ -82,6 +82,24 @@ xfs_prealloc_blocks(
 }
 
 /*
+ * The number of blocks per AG that we withhold from xfs_mod_fdblocks to
+ * guarantee that we can refill the AGFL prior to allocating space in a nearly
+ * full AG.  Although the the space described by the free space btrees, the
+ * blocks used by the freesp btrees themselves, and the blocks owned by the
+ * AGFL are counted in the ondisk fdblocks, it's a mistake to let the ondisk
+ * free space in the AG drop so low that the free space btrees cannot refill an
+ * empty AGFL up to the minimum level.  Rather than grind through empty AGs
+ * until the fs goes down, we subtract this many AG blocks from the incore
+ * fdblocks to ensure user allocation does not overcommit the space the
+ * filesystem needs for the AGFLs.  The rmap btree uses a per-AG reservation to
+ * withhold space from xfs_mod_fdblocks, so we do not account for that here.
+ */
+#define XFS_ALLOCBT_AGFL_RESERVE       4
+
+/*
+ * Compute the number of blocks that we set aside to guarantee the ability to
+ * refill the AGFL and handle a full bmap btree split.
+ *
  * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
  * AGF buffer (PV 947395), we place constraints on the relationship among
  * actual allocations for data blocks, freelist blocks, and potential file data
@@ -93,14 +111,14 @@ xfs_prealloc_blocks(
  * extents need to be actually allocated. To get around this, we explicitly set
  * aside a few blocks which will not be reserved in delayed allocation.
  *
- * We need to reserve 4 fsbs _per AG_ for the freelist and 4 more to handle a
- * potential split of the file's bmap btree.
+ * For each AG, we need to reserve enough blocks to replenish a totally empty
+ * AGFL and 4 more to handle a potential split of the file's bmap btree.
  */
 unsigned int
 xfs_alloc_set_aside(
        struct xfs_mount        *mp)
 {
-       return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4);
+       return mp->m_sb.sb_agcount * (XFS_ALLOCBT_AGFL_RESERVE + 4);
 }
 
 /*
@@ -124,12 +142,12 @@ xfs_alloc_ag_max_usable(
        unsigned int            blocks;
 
        blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
-       blocks += XFS_ALLOC_AGFL_RESERVE;
+       blocks += XFS_ALLOCBT_AGFL_RESERVE;
        blocks += 3;                    /* AGF, AGI btree root blocks */
        if (xfs_has_finobt(mp))
                blocks++;               /* finobt root block */
        if (xfs_has_rmapbt(mp))
-               blocks++;               /* rmap root block */
+               blocks++;               /* rmap root block */
        if (xfs_has_reflink(mp))
                blocks++;               /* refcount root block */
 
index 1c14a0b1abeafb32a4f5167a2759edd5cc54118a..d4c057b764f905cd52b0fc161ff0d8f37958424d 100644 (file)
@@ -88,7 +88,6 @@ typedef struct xfs_alloc_arg {
 #define XFS_ALLOC_NOBUSY               (1 << 2)/* Busy extents not allowed */
 
 /* freespace limit calculations */
-#define XFS_ALLOC_AGFL_RESERVE 4
 unsigned int xfs_alloc_set_aside(struct xfs_mount *mp);
 unsigned int xfs_alloc_ag_max_usable(struct xfs_mount *mp);
 
index 32fa02945f7399d30b367dac248bf4fb344582c6..ae4345b37621b600b8156cb5c10f7b2bac6a79da 100644 (file)
@@ -9,39 +9,6 @@ static inline unsigned int bio_max_vecs(unsigned int count)
        return bio_max_segs(howmany(count, PAGE_SIZE));
 }
 
-static void
-xfs_flush_bdev_async_endio(
-       struct bio      *bio)
-{
-       complete(bio->bi_private);
-}
-
-/*
- * Submit a request for an async cache flush to run. If the request queue does
- * not require flush operations, just skip it altogether. If the caller needs
- * to wait for the flush completion at a later point in time, they must supply a
- * valid completion. This will be signalled when the flush completes.  The
- * caller never sees the bio that is issued here.
- */
-void
-xfs_flush_bdev_async(
-       struct bio              *bio,
-       struct block_device     *bdev,
-       struct completion       *done)
-{
-       struct request_queue    *q = bdev->bd_disk->queue;
-
-       if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
-               complete(done);
-               return;
-       }
-
-       bio_init(bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC);
-       bio->bi_private = done;
-       bio->bi_end_io = xfs_flush_bdev_async_endio;
-
-       submit_bio(bio);
-}
 int
 xfs_rw_bdev(
        struct block_device     *bdev,
index 33e26690a8c4fc1d3722b2eabe51245cd87d6aa5..68f74549fa2280612c19f0e05873e9b8372f66e3 100644 (file)
@@ -17,6 +17,7 @@
 #include "xfs_fsops.h"
 #include "xfs_trans_space.h"
 #include "xfs_log.h"
+#include "xfs_log_priv.h"
 #include "xfs_ag.h"
 #include "xfs_ag_resv.h"
 #include "xfs_trace.h"
@@ -347,7 +348,7 @@ xfs_fs_counts(
        cnt->allocino = percpu_counter_read_positive(&mp->m_icount);
        cnt->freeino = percpu_counter_read_positive(&mp->m_ifree);
        cnt->freedata = percpu_counter_read_positive(&mp->m_fdblocks) -
-                                               mp->m_alloc_set_aside;
+                                               xfs_fdblocks_unavailable(mp);
 
        spin_lock(&mp->m_sb_lock);
        cnt->freertx = mp->m_sb.sb_frextents;
@@ -430,46 +431,36 @@ xfs_reserve_blocks(
         * If the request is larger than the current reservation, reserve the
         * blocks before we update the reserve counters. Sample m_fdblocks and
         * perform a partial reservation if the request exceeds free space.
+        *
+        * The code below estimates how many blocks it can request from
+        * fdblocks to stash in the reserve pool.  This is a classic TOCTOU
+        * race since fdblocks updates are not always coordinated via
+        * m_sb_lock.  Set the reserve size even if there's not enough free
+        * space to fill it because mod_fdblocks will refill an undersized
+        * reserve when it can.
         */
-       error = -ENOSPC;
-       do {
-               free = percpu_counter_sum(&mp->m_fdblocks) -
-                                               mp->m_alloc_set_aside;
-               if (free <= 0)
-                       break;
-
-               delta = request - mp->m_resblks;
-               lcounter = free - delta;
-               if (lcounter < 0)
-                       /* We can't satisfy the request, just get what we can */
-                       fdblks_delta = free;
-               else
-                       fdblks_delta = delta;
-
+       free = percpu_counter_sum(&mp->m_fdblocks) -
+                                               xfs_fdblocks_unavailable(mp);
+       delta = request - mp->m_resblks;
+       mp->m_resblks = request;
+       if (delta > 0 && free > 0) {
                /*
                 * We'll either succeed in getting space from the free block
-                * count or we'll get an ENOSPC. If we get a ENOSPC, it means
-                * things changed while we were calculating fdblks_delta and so
-                * we should try again to see if there is anything left to
-                * reserve.
+                * count or we'll get an ENOSPC.  Don't set the reserved flag
+                * here - we don't want to reserve the extra reserve blocks
+                * from the reserve.
                 *
-                * Don't set the reserved flag here - we don't want to reserve
-                * the extra reserve blocks from the reserve.....
+                * The desired reserve size can change after we drop the lock.
+                * Use mod_fdblocks to put the space into the reserve or into
+                * fdblocks as appropriate.
                 */
+               fdblks_delta = min(free, delta);
                spin_unlock(&mp->m_sb_lock);
                error = xfs_mod_fdblocks(mp, -fdblks_delta, 0);
+               if (!error)
+                       xfs_mod_fdblocks(mp, fdblks_delta, 0);
                spin_lock(&mp->m_sb_lock);
-       } while (error == -ENOSPC);
-
-       /*
-        * Update the reserve counters if blocks have been successfully
-        * allocated.
-        */
-       if (!error && fdblks_delta) {
-               mp->m_resblks += fdblks_delta;
-               mp->m_resblks_avail += fdblks_delta;
        }
-
 out:
        if (outval) {
                outval->resblks = mp->m_resblks;
@@ -528,8 +519,11 @@ xfs_do_force_shutdown(
        int             tag;
        const char      *why;
 
-       if (test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &mp->m_opstate))
+
+       if (test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &mp->m_opstate)) {
+               xlog_shutdown_wait(mp->m_log);
                return;
+       }
        if (mp->m_sb_bp)
                mp->m_sb_bp->b_flags |= XBF_DONE;
 
index 20186c584c7d6824c26500248ab554fc98257007..bffd6eb0b298eedb54f820014302dd1bd2bca765 100644 (file)
@@ -883,7 +883,7 @@ xfs_reclaim_inode(
         */
        if (xlog_is_shutdown(ip->i_mount->m_log)) {
                xfs_iunpin_wait(ip);
-               xfs_iflush_abort(ip);
+               xfs_iflush_shutdown_abort(ip);
                goto reclaim;
        }
        if (xfs_ipincount(ip))
index 26227d26f274fca1551b82816a35f9425ebfa4a8..9de6205fe134a926838384e1b5a952cbc8ce63f2 100644 (file)
@@ -3631,7 +3631,7 @@ xfs_iflush_cluster(
 
        /*
         * We must use the safe variant here as on shutdown xfs_iflush_abort()
-        * can remove itself from the list.
+        * will remove itself from the list.
         */
        list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
                iip = (struct xfs_inode_log_item *)lip;
index 11158fa81a093d0e5981b8839075e9e6fb23d4e1..9e6ef55cf29e15e00d49545422c45ae3889aea83 100644 (file)
@@ -544,10 +544,17 @@ xfs_inode_item_push(
        uint                    rval = XFS_ITEM_SUCCESS;
        int                     error;
 
-       ASSERT(iip->ili_item.li_buf);
+       if (!bp || (ip->i_flags & XFS_ISTALE)) {
+               /*
+                * Inode item/buffer is being being aborted due to cluster
+                * buffer deletion. Trigger a log force to have that operation
+                * completed and items removed from the AIL before the next push
+                * attempt.
+                */
+               return XFS_ITEM_PINNED;
+       }
 
-       if (xfs_ipincount(ip) > 0 || xfs_buf_ispinned(bp) ||
-           (ip->i_flags & XFS_ISTALE))
+       if (xfs_ipincount(ip) > 0 || xfs_buf_ispinned(bp))
                return XFS_ITEM_PINNED;
 
        if (xfs_iflags_test(ip, XFS_IFLUSHING))
@@ -834,46 +841,143 @@ xfs_buf_inode_io_fail(
 }
 
 /*
- * This is the inode flushing abort routine.  It is called when
- * the filesystem is shutting down to clean up the inode state.  It is
- * responsible for removing the inode item from the AIL if it has not been
- * re-logged and clearing the inode's flush state.
+ * Clear the inode logging fields so no more flushes are attempted.  If we are
+ * on a buffer list, it is now safe to remove it because the buffer is
+ * guaranteed to be locked. The caller will drop the reference to the buffer
+ * the log item held.
+ */
+static void
+xfs_iflush_abort_clean(
+       struct xfs_inode_log_item *iip)
+{
+       iip->ili_last_fields = 0;
+       iip->ili_fields = 0;
+       iip->ili_fsync_fields = 0;
+       iip->ili_flush_lsn = 0;
+       iip->ili_item.li_buf = NULL;
+       list_del_init(&iip->ili_item.li_bio_list);
+}
+
+/*
+ * Abort flushing the inode from a context holding the cluster buffer locked.
+ *
+ * This is the normal runtime method of aborting writeback of an inode that is
+ * attached to a cluster buffer. It occurs when the inode and the backing
+ * cluster buffer have been freed (i.e. inode is XFS_ISTALE), or when cluster
+ * flushing or buffer IO completion encounters a log shutdown situation.
+ *
+ * If we need to abort inode writeback and we don't already hold the buffer
+ * locked, call xfs_iflush_shutdown_abort() instead as this should only ever be
+ * necessary in a shutdown situation.
  */
 void
 xfs_iflush_abort(
        struct xfs_inode        *ip)
 {
        struct xfs_inode_log_item *iip = ip->i_itemp;
-       struct xfs_buf          *bp = NULL;
+       struct xfs_buf          *bp;
 
-       if (iip) {
-               /*
-                * Clear the failed bit before removing the item from the AIL so
-                * xfs_trans_ail_delete() doesn't try to clear and release the
-                * buffer attached to the log item before we are done with it.
-                */
-               clear_bit(XFS_LI_FAILED, &iip->ili_item.li_flags);
-               xfs_trans_ail_delete(&iip->ili_item, 0);
+       if (!iip) {
+               /* clean inode, nothing to do */
+               xfs_iflags_clear(ip, XFS_IFLUSHING);
+               return;
+       }
+
+       /*
+        * Remove the inode item from the AIL before we clear its internal
+        * state. Whilst the inode is in the AIL, it should have a valid buffer
+        * pointer for push operations to access - it is only safe to remove the
+        * inode from the buffer once it has been removed from the AIL.
+        *
+        * We also clear the failed bit before removing the item from the AIL
+        * as xfs_trans_ail_delete()->xfs_clear_li_failed() will release buffer
+        * references the inode item owns and needs to hold until we've fully
+        * aborted the inode log item and detached it from the buffer.
+        */
+       clear_bit(XFS_LI_FAILED, &iip->ili_item.li_flags);
+       xfs_trans_ail_delete(&iip->ili_item, 0);
+
+       /*
+        * Grab the inode buffer so can we release the reference the inode log
+        * item holds on it.
+        */
+       spin_lock(&iip->ili_lock);
+       bp = iip->ili_item.li_buf;
+       xfs_iflush_abort_clean(iip);
+       spin_unlock(&iip->ili_lock);
 
+       xfs_iflags_clear(ip, XFS_IFLUSHING);
+       if (bp)
+               xfs_buf_rele(bp);
+}
+
+/*
+ * Abort an inode flush in the case of a shutdown filesystem. This can be called
+ * from anywhere with just an inode reference and does not require holding the
+ * inode cluster buffer locked. If the inode is attached to a cluster buffer,
+ * it will grab and lock it safely, then abort the inode flush.
+ */
+void
+xfs_iflush_shutdown_abort(
+       struct xfs_inode        *ip)
+{
+       struct xfs_inode_log_item *iip = ip->i_itemp;
+       struct xfs_buf          *bp;
+
+       if (!iip) {
+               /* clean inode, nothing to do */
+               xfs_iflags_clear(ip, XFS_IFLUSHING);
+               return;
+       }
+
+       spin_lock(&iip->ili_lock);
+       bp = iip->ili_item.li_buf;
+       if (!bp) {
+               spin_unlock(&iip->ili_lock);
+               xfs_iflush_abort(ip);
+               return;
+       }
+
+       /*
+        * We have to take a reference to the buffer so that it doesn't get
+        * freed when we drop the ili_lock and then wait to lock the buffer.
+        * We'll clean up the extra reference after we pick up the ili_lock
+        * again.
+        */
+       xfs_buf_hold(bp);
+       spin_unlock(&iip->ili_lock);
+       xfs_buf_lock(bp);
+
+       spin_lock(&iip->ili_lock);
+       if (!iip->ili_item.li_buf) {
                /*
-                * Clear the inode logging fields so no more flushes are
-                * attempted.
+                * Raced with another removal, hold the only reference
+                * to bp now. Inode should not be in the AIL now, so just clean
+                * up and return;
                 */
-               spin_lock(&iip->ili_lock);
-               iip->ili_last_fields = 0;
-               iip->ili_fields = 0;
-               iip->ili_fsync_fields = 0;
-               iip->ili_flush_lsn = 0;
-               bp = iip->ili_item.li_buf;
-               iip->ili_item.li_buf = NULL;
-               list_del_init(&iip->ili_item.li_bio_list);
+               ASSERT(list_empty(&iip->ili_item.li_bio_list));
+               ASSERT(!test_bit(XFS_LI_IN_AIL, &iip->ili_item.li_flags));
+               xfs_iflush_abort_clean(iip);
                spin_unlock(&iip->ili_lock);
+               xfs_iflags_clear(ip, XFS_IFLUSHING);
+               xfs_buf_relse(bp);
+               return;
        }
-       xfs_iflags_clear(ip, XFS_IFLUSHING);
-       if (bp)
-               xfs_buf_rele(bp);
+
+       /*
+        * Got two references to bp. The first will get dropped by
+        * xfs_iflush_abort() when the item is removed from the buffer list, but
+        * we can't drop our reference until _abort() returns because we have to
+        * unlock the buffer as well. Hence we abort and then unlock and release
+        * our reference to the buffer.
+        */
+       ASSERT(iip->ili_item.li_buf == bp);
+       spin_unlock(&iip->ili_lock);
+       xfs_iflush_abort(ip);
+       xfs_buf_relse(bp);
 }
 
+
 /*
  * convert an xfs_inode_log_format struct from the old 32 bit version
  * (which can have different field alignments) to the native 64 bit version
index 1a302000d604de54c3b37e6de0e479aa1cd7ef08..bbd836a44ff0480328704bf53907c0de4a8a6132 100644 (file)
@@ -44,6 +44,7 @@ static inline int xfs_inode_clean(struct xfs_inode *ip)
 extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *);
 extern void xfs_inode_item_destroy(struct xfs_inode *);
 extern void xfs_iflush_abort(struct xfs_inode *);
+extern void xfs_iflush_shutdown_abort(struct xfs_inode *);
 extern int xfs_inode_item_format_convert(xfs_log_iovec_t *,
                                         struct xfs_inode_log_format *);
 
index 09a8fba84ff99b2f6eaf66843e5420468fd3f50c..cb9105d667db440dfdbebbb48606ddebe8857af3 100644 (file)
@@ -197,8 +197,6 @@ static inline uint64_t howmany_64(uint64_t x, uint32_t y)
 
 int xfs_rw_bdev(struct block_device *bdev, sector_t sector, unsigned int count,
                char *data, unsigned int op);
-void xfs_flush_bdev_async(struct bio *bio, struct block_device *bdev,
-               struct completion *done);
 
 #define ASSERT_ALWAYS(expr)    \
        (likely(expr) ? (void)0 : assfail(NULL, #expr, __FILE__, __LINE__))
index a8034c0afdf2270d0fe7a580cbe2f5375031b1cb..499e15b24215703aa3847972b6c44a87a28e70b6 100644 (file)
@@ -487,7 +487,10 @@ out_error:
  * Run all the pending iclog callbacks and wake log force waiters and iclog
  * space waiters so they can process the newly set shutdown state. We really
  * don't care what order we process callbacks here because the log is shut down
- * and so state cannot change on disk anymore.
+ * and so state cannot change on disk anymore. However, we cannot wake waiters
+ * until the callbacks have been processed because we may be in unmount and
+ * we must ensure that all AIL operations the callbacks perform have completed
+ * before we tear down the AIL.
  *
  * We avoid processing actively referenced iclogs so that we don't run callbacks
  * while the iclog owner might still be preparing the iclog for IO submssion.
@@ -501,7 +504,6 @@ xlog_state_shutdown_callbacks(
        struct xlog_in_core     *iclog;
        LIST_HEAD(cb_list);
 
-       spin_lock(&log->l_icloglock);
        iclog = log->l_iclog;
        do {
                if (atomic_read(&iclog->ic_refcnt)) {
@@ -509,26 +511,22 @@ xlog_state_shutdown_callbacks(
                        continue;
                }
                list_splice_init(&iclog->ic_callbacks, &cb_list);
+               spin_unlock(&log->l_icloglock);
+
+               xlog_cil_process_committed(&cb_list);
+
+               spin_lock(&log->l_icloglock);
                wake_up_all(&iclog->ic_write_wait);
                wake_up_all(&iclog->ic_force_wait);
        } while ((iclog = iclog->ic_next) != log->l_iclog);
 
        wake_up_all(&log->l_flush_wait);
-       spin_unlock(&log->l_icloglock);
-
-       xlog_cil_process_committed(&cb_list);
 }
 
 /*
  * Flush iclog to disk if this is the last reference to the given iclog and the
  * it is in the WANT_SYNC state.
  *
- * If the caller passes in a non-zero @old_tail_lsn and the current log tail
- * does not match, there may be metadata on disk that must be persisted before
- * this iclog is written.  To satisfy that requirement, set the
- * XLOG_ICL_NEED_FLUSH flag as a condition for writing this iclog with the new
- * log tail value.
- *
  * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the
  * log tail is updated correctly. NEED_FUA indicates that the iclog will be
  * written to stable storage, and implies that a commit record is contained
@@ -545,12 +543,10 @@ xlog_state_shutdown_callbacks(
  * always capture the tail lsn on the iclog on the first NEED_FUA release
  * regardless of the number of active reference counts on this iclog.
  */
-
 int
 xlog_state_release_iclog(
        struct xlog             *log,
-       struct xlog_in_core     *iclog,
-       xfs_lsn_t               old_tail_lsn)
+       struct xlog_in_core     *iclog)
 {
        xfs_lsn_t               tail_lsn;
        bool                    last_ref;
@@ -561,18 +557,14 @@ xlog_state_release_iclog(
        /*
         * Grabbing the current log tail needs to be atomic w.r.t. the writing
         * of the tail LSN into the iclog so we guarantee that the log tail does
-        * not move between deciding if a cache flush is required and writing
-        * the LSN into the iclog below.
+        * not move between the first time we know that the iclog needs to be
+        * made stable and when we eventually submit it.
         */
-       if (old_tail_lsn || iclog->ic_state == XLOG_STATE_WANT_SYNC) {
+       if ((iclog->ic_state == XLOG_STATE_WANT_SYNC ||
+            (iclog->ic_flags & XLOG_ICL_NEED_FUA)) &&
+           !iclog->ic_header.h_tail_lsn) {
                tail_lsn = xlog_assign_tail_lsn(log->l_mp);
-
-               if (old_tail_lsn && tail_lsn != old_tail_lsn)
-                       iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
-
-               if ((iclog->ic_flags & XLOG_ICL_NEED_FUA) &&
-                   !iclog->ic_header.h_tail_lsn)
-                       iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
+               iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
        }
 
        last_ref = atomic_dec_and_test(&iclog->ic_refcnt);
@@ -583,11 +575,8 @@ xlog_state_release_iclog(
                 * pending iclog callbacks that were waiting on the release of
                 * this iclog.
                 */
-               if (last_ref) {
-                       spin_unlock(&log->l_icloglock);
+               if (last_ref)
                        xlog_state_shutdown_callbacks(log);
-                       spin_lock(&log->l_icloglock);
-               }
                return -EIO;
        }
 
@@ -600,8 +589,6 @@ xlog_state_release_iclog(
        }
 
        iclog->ic_state = XLOG_STATE_SYNCING;
-       if (!iclog->ic_header.h_tail_lsn)
-               iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
        xlog_verify_tail_lsn(log, iclog);
        trace_xlog_iclog_syncing(iclog, _RET_IP_);
 
@@ -873,7 +860,7 @@ xlog_force_iclog(
        iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
        if (iclog->ic_state == XLOG_STATE_ACTIVE)
                xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
-       return xlog_state_release_iclog(iclog->ic_log, iclog, 0);
+       return xlog_state_release_iclog(iclog->ic_log, iclog);
 }
 
 /*
@@ -1373,7 +1360,7 @@ xlog_ioend_work(
         */
        if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) {
                xfs_alert(log->l_mp, "log I/O error %d", error);
-               xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
+               xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
        }
 
        xlog_state_done_syncing(iclog);
@@ -1912,7 +1899,7 @@ xlog_write_iclog(
        iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
 
        if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) {
-               xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
+               xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
                return;
        }
        if (is_vmalloc_addr(iclog->ic_data))
@@ -2411,7 +2398,7 @@ xlog_write_copy_finish(
                ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
                        xlog_is_shutdown(log));
 release_iclog:
-       error = xlog_state_release_iclog(log, iclog, 0);
+       error = xlog_state_release_iclog(log, iclog);
        spin_unlock(&log->l_icloglock);
        return error;
 }
@@ -2487,7 +2474,7 @@ xlog_write(
                xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
                     "ctx ticket reservation ran out. Need to up reservation");
                xlog_print_tic_res(log->l_mp, ticket);
-               xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
+               xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
        }
 
        len = xlog_write_calc_vec_length(ticket, log_vector, optype);
@@ -2628,7 +2615,7 @@ next_lv:
 
        spin_lock(&log->l_icloglock);
        xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
-       error = xlog_state_release_iclog(log, iclog, 0);
+       error = xlog_state_release_iclog(log, iclog);
        spin_unlock(&log->l_icloglock);
 
        return error;
@@ -3052,7 +3039,7 @@ restart:
                 * reference to the iclog.
                 */
                if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
-                       error = xlog_state_release_iclog(log, iclog, 0);
+                       error = xlog_state_release_iclog(log, iclog);
                spin_unlock(&log->l_icloglock);
                if (error)
                        return error;
@@ -3821,9 +3808,10 @@ xlog_verify_iclog(
 #endif
 
 /*
- * Perform a forced shutdown on the log. This should be called once and once
- * only by the high level filesystem shutdown code to shut the log subsystem
- * down cleanly.
+ * Perform a forced shutdown on the log.
+ *
+ * This can be called from low level log code to trigger a shutdown, or from the
+ * high level mount shutdown code when the mount shuts down.
  *
  * Our main objectives here are to make sure that:
  *     a. if the shutdown was not due to a log IO error, flush the logs to
@@ -3832,6 +3820,8 @@ xlog_verify_iclog(
  *        parties to find out. Nothing new gets queued after this is done.
  *     c. Tasks sleeping on log reservations, pinned objects and
  *        other resources get woken up.
+ *     d. The mount is also marked as shut down so that log triggered shutdowns
+ *        still behave the same as if they called xfs_forced_shutdown().
  *
  * Return true if the shutdown cause was a log IO error and we actually shut the
  * log down.
@@ -3843,25 +3833,25 @@ xlog_force_shutdown(
 {
        bool            log_error = (shutdown_flags & SHUTDOWN_LOG_IO_ERROR);
 
-       /*
-        * If this happens during log recovery then we aren't using the runtime
-        * log mechanisms yet so there's nothing to shut down.
-        */
-       if (!log || xlog_in_recovery(log))
+       if (!log)
                return false;
 
-       ASSERT(!xlog_is_shutdown(log));
-
        /*
         * Flush all the completed transactions to disk before marking the log
         * being shut down. We need to do this first as shutting down the log
         * before the force will prevent the log force from flushing the iclogs
         * to disk.
         *
-        * Re-entry due to a log IO error shutdown during the log force is
-        * prevented by the atomicity of higher level shutdown code.
+        * When we are in recovery, there are no transactions to flush, and
+        * we don't want to touch the log because we don't want to perturb the
+        * current head/tail for future recovery attempts. Hence we need to
+        * avoid a log force in this case.
+        *
+        * If we are shutting down due to a log IO error, then we must avoid
+        * trying to write the log as that may just result in more IO errors and
+        * an endless shutdown/force loop.
         */
-       if (!log_error)
+       if (!log_error && !xlog_in_recovery(log))
                xfs_log_force(log->l_mp, XFS_LOG_SYNC);
 
        /*
@@ -3878,11 +3868,24 @@ xlog_force_shutdown(
        spin_lock(&log->l_icloglock);
        if (test_and_set_bit(XLOG_IO_ERROR, &log->l_opstate)) {
                spin_unlock(&log->l_icloglock);
-               ASSERT(0);
                return false;
        }
        spin_unlock(&log->l_icloglock);
 
+       /*
+        * If this log shutdown also sets the mount shutdown state, issue a
+        * shutdown warning message.
+        */
+       if (!test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &log->l_mp->m_opstate)) {
+               xfs_alert_tag(log->l_mp, XFS_PTAG_SHUTDOWN_LOGERROR,
+"Filesystem has been shut down due to log error (0x%x).",
+                               shutdown_flags);
+               xfs_alert(log->l_mp,
+"Please unmount the filesystem and rectify the problem(s).");
+               if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
+                       xfs_stack_trace();
+       }
+
        /*
         * We don't want anybody waiting for log reservations after this. That
         * means we have to wake up everybody queued up on reserveq as well as
@@ -3903,8 +3906,12 @@ xlog_force_shutdown(
        wake_up_all(&log->l_cilp->xc_start_wait);
        wake_up_all(&log->l_cilp->xc_commit_wait);
        spin_unlock(&log->l_cilp->xc_push_lock);
+
+       spin_lock(&log->l_icloglock);
        xlog_state_shutdown_callbacks(log);
+       spin_unlock(&log->l_icloglock);
 
+       wake_up_var(&log->l_opstate);
        return log_error;
 }
 
index 796e4464f809b143ede4dab5698a1bd0cb7db64b..ba57323bfdcea38d0c02cc308c43260cd89d5e29 100644 (file)
@@ -540,7 +540,7 @@ xlog_cil_insert_items(
        spin_unlock(&cil->xc_cil_lock);
 
        if (tp->t_ticket->t_curr_res < 0)
-               xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
+               xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
 }
 
 static void
@@ -705,11 +705,21 @@ xlog_cil_set_ctx_write_state(
                 * The LSN we need to pass to the log items on transaction
                 * commit is the LSN reported by the first log vector write, not
                 * the commit lsn. If we use the commit record lsn then we can
-                * move the tail beyond the grant write head.
+                * move the grant write head beyond the tail LSN and overwrite
+                * it.
                 */
                ctx->start_lsn = lsn;
                wake_up_all(&cil->xc_start_wait);
                spin_unlock(&cil->xc_push_lock);
+
+               /*
+                * Make sure the metadata we are about to overwrite in the log
+                * has been flushed to stable storage before this iclog is
+                * issued.
+                */
+               spin_lock(&cil->xc_log->l_icloglock);
+               iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
+               spin_unlock(&cil->xc_log->l_icloglock);
                return;
        }
 
@@ -854,7 +864,7 @@ xlog_cil_write_commit_record(
 
        error = xlog_write(log, ctx, &vec, ctx->ticket, XLOG_COMMIT_TRANS);
        if (error)
-               xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
+               xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
        return error;
 }
 
@@ -888,10 +898,7 @@ xlog_cil_push_work(
        struct xfs_trans_header thdr;
        struct xfs_log_iovec    lhdr;
        struct xfs_log_vec      lvhdr = { NULL };
-       xfs_lsn_t               preflush_tail_lsn;
        xfs_csn_t               push_seq;
-       struct bio              bio;
-       DECLARE_COMPLETION_ONSTACK(bdev_flush);
        bool                    push_commit_stable;
 
        new_ctx = xlog_cil_ctx_alloc();
@@ -961,23 +968,6 @@ xlog_cil_push_work(
        list_add(&ctx->committing, &cil->xc_committing);
        spin_unlock(&cil->xc_push_lock);
 
-       /*
-        * The CIL is stable at this point - nothing new will be added to it
-        * because we hold the flush lock exclusively. Hence we can now issue
-        * a cache flush to ensure all the completed metadata in the journal we
-        * are about to overwrite is on stable storage.
-        *
-        * Because we are issuing this cache flush before we've written the
-        * tail lsn to the iclog, we can have metadata IO completions move the
-        * tail forwards between the completion of this flush and the iclog
-        * being written. In this case, we need to re-issue the cache flush
-        * before the iclog write. To detect whether the log tail moves, sample
-        * the tail LSN *before* we issue the flush.
-        */
-       preflush_tail_lsn = atomic64_read(&log->l_tail_lsn);
-       xfs_flush_bdev_async(&bio, log->l_mp->m_ddev_targp->bt_bdev,
-                               &bdev_flush);
-
        /*
         * Pull all the log vectors off the items in the CIL, and remove the
         * items from the CIL. We don't need the CIL lock here because it's only
@@ -1054,12 +1044,6 @@ xlog_cil_push_work(
        lvhdr.lv_iovecp = &lhdr;
        lvhdr.lv_next = ctx->lv_chain;
 
-       /*
-        * Before we format and submit the first iclog, we have to ensure that
-        * the metadata writeback ordering cache flush is complete.
-        */
-       wait_for_completion(&bdev_flush);
-
        error = xlog_cil_write_chain(ctx, &lvhdr);
        if (error)
                goto out_abort_free_ticket;
@@ -1118,7 +1102,7 @@ xlog_cil_push_work(
        if (push_commit_stable &&
            ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE)
                xlog_state_switch_iclogs(log, ctx->commit_iclog, 0);
-       xlog_state_release_iclog(log, ctx->commit_iclog, preflush_tail_lsn);
+       xlog_state_release_iclog(log, ctx->commit_iclog);
 
        /* Not safe to reference ctx now! */
 
@@ -1139,7 +1123,7 @@ out_abort_free_ticket:
                return;
        }
        spin_lock(&log->l_icloglock);
-       xlog_state_release_iclog(log, ctx->commit_iclog, 0);
+       xlog_state_release_iclog(log, ctx->commit_iclog);
        /* Not safe to reference ctx now! */
        spin_unlock(&log->l_icloglock);
 }
index 23103d68423c58faa60bfb38177d87f40311775e..401cdc400980b07bba88fc1a6be6576c70e6a9d7 100644 (file)
@@ -484,6 +484,17 @@ xlog_is_shutdown(struct xlog *log)
        return test_bit(XLOG_IO_ERROR, &log->l_opstate);
 }
 
+/*
+ * Wait until the xlog_force_shutdown() has marked the log as shut down
+ * so xlog_is_shutdown() will always return true.
+ */
+static inline void
+xlog_shutdown_wait(
+       struct xlog     *log)
+{
+       wait_var_event(&log->l_opstate, xlog_is_shutdown(log));
+}
+
 /* common routines */
 extern int
 xlog_recover(
@@ -524,8 +535,7 @@ void        xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket);
 
 void xlog_state_switch_iclogs(struct xlog *log, struct xlog_in_core *iclog,
                int eventual_size);
-int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog,
-               xfs_lsn_t log_tail_lsn);
+int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog);
 
 /*
  * When we crack an atomic LSN, we sample it first so that the value will not
index 96c997ed2ec8faa54d4429dd22e338d9e7ebb605..c4ad4296c5401ba06e11fc7c09846f04f0d2f946 100644 (file)
@@ -2485,7 +2485,7 @@ xlog_finish_defer_ops(
                error = xfs_trans_alloc(mp, &resv, dfc->dfc_blkres,
                                dfc->dfc_rtxres, XFS_TRANS_RESERVE, &tp);
                if (error) {
-                       xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
+                       xlog_force_shutdown(mp->m_log, SHUTDOWN_LOG_IO_ERROR);
                        return error;
                }
 
@@ -2519,21 +2519,22 @@ xlog_abort_defer_ops(
                xfs_defer_ops_capture_free(mp, dfc);
        }
 }
+
 /*
  * When this is called, all of the log intent items which did not have
- * corresponding log done items should be in the AIL.  What we do now
- * is update the data structures associated with each one.
+ * corresponding log done items should be in the AIL.  What we do now is update
+ * the data structures associated with each one.
  *
- * Since we process the log intent items in normal transactions, they
- * will be removed at some point after the commit.  This prevents us
- * from just walking down the list processing each one.  We'll use a
- * flag in the intent item to skip those that we've already processed
- * and use the AIL iteration mechanism's generation count to try to
- * speed this up at least a bit.
+ * Since we process the log intent items in normal transactions, they will be
+ * removed at some point after the commit.  This prevents us from just walking
+ * down the list processing each one.  We'll use a flag in the intent item to
+ * skip those that we've already processed and use the AIL iteration mechanism's
+ * generation count to try to speed this up at least a bit.
  *
- * When we start, we know that the intents are the only things in the
- * AIL.  As we process them, however, other items are added to the
- * AIL.
+ * When we start, we know that the intents are the only things in the AIL. As we
+ * process them, however, other items are added to the AIL. Hence we know we
+ * have started recovery on all the pending intents when we find an non-intent
+ * item in the AIL.
  */
 STATIC int
 xlog_recover_process_intents(
@@ -2556,17 +2557,8 @@ xlog_recover_process_intents(
        for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
             lip != NULL;
             lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
-               /*
-                * We're done when we see something other than an intent.
-                * There should be no intents left in the AIL now.
-                */
-               if (!xlog_item_is_intent(lip)) {
-#ifdef DEBUG
-                       for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
-                               ASSERT(!xlog_item_is_intent(lip));
-#endif
+               if (!xlog_item_is_intent(lip))
                        break;
-               }
 
                /*
                 * We should never see a redo item with a LSN higher than
@@ -2607,8 +2599,9 @@ err:
 }
 
 /*
- * A cancel occurs when the mount has failed and we're bailing out.
- * Release all pending log intent items so they don't pin the AIL.
+ * A cancel occurs when the mount has failed and we're bailing out.  Release all
+ * pending log intent items that we haven't started recovery on so they don't
+ * pin the AIL.
  */
 STATIC void
 xlog_recover_cancel_intents(
@@ -2622,17 +2615,8 @@ xlog_recover_cancel_intents(
        spin_lock(&ailp->ail_lock);
        lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
        while (lip != NULL) {
-               /*
-                * We're done when we see something other than an intent.
-                * There should be no intents left in the AIL now.
-                */
-               if (!xlog_item_is_intent(lip)) {
-#ifdef DEBUG
-                       for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
-                               ASSERT(!xlog_item_is_intent(lip));
-#endif
+               if (!xlog_item_is_intent(lip))
                        break;
-               }
 
                spin_unlock(&ailp->ail_lock);
                lip->li_ops->iop_release(lip);
@@ -3470,7 +3454,7 @@ xlog_recover_finish(
                 */
                xlog_recover_cancel_intents(log);
                xfs_alert(log->l_mp, "Failed to recover intents");
-               xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
+               xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
                return error;
        }
 
@@ -3517,7 +3501,7 @@ xlog_recover_finish(
                 * end of intents processing can be pushed through the CIL
                 * and AIL.
                 */
-               xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
+               xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
        }
 
        return 0;
index bed73e8002a51bc0bdebe2922faa43857438971f..c5f153c3693fc63c21d36a6afa84c8ccc69d0364 100644 (file)
@@ -21,6 +21,7 @@
 #include "xfs_trans.h"
 #include "xfs_trans_priv.h"
 #include "xfs_log.h"
+#include "xfs_log_priv.h"
 #include "xfs_error.h"
 #include "xfs_quota.h"
 #include "xfs_fsops.h"
@@ -1146,7 +1147,7 @@ xfs_mod_fdblocks(
         * problems (i.e. transaction abort, pagecache discards, etc.) than
         * slightly premature -ENOSPC.
         */
-       set_aside = mp->m_alloc_set_aside + atomic64_read(&mp->m_allocbt_blks);
+       set_aside = xfs_fdblocks_unavailable(mp);
        percpu_counter_add_batch(&mp->m_fdblocks, delta, batch);
        if (__percpu_counter_compare(&mp->m_fdblocks, set_aside,
                                     XFS_FDBLOCKS_BATCH) >= 0) {
index 00720a02e7615198454b8296047aa98d0299859c..f6dc19de8322a6f104672bb61d53b8983e9b8566 100644 (file)
@@ -479,6 +479,21 @@ extern void        xfs_unmountfs(xfs_mount_t *);
  */
 #define XFS_FDBLOCKS_BATCH     1024
 
+/*
+ * Estimate the amount of free space that is not available to userspace and is
+ * not explicitly reserved from the incore fdblocks.  This includes:
+ *
+ * - The minimum number of blocks needed to support splitting a bmap btree
+ * - The blocks currently in use by the freespace btrees because they record
+ *   the actual blocks that will fill per-AG metadata space reservations
+ */
+static inline uint64_t
+xfs_fdblocks_unavailable(
+       struct xfs_mount        *mp)
+{
+       return mp->m_alloc_set_aside + atomic64_read(&mp->m_allocbt_blks);
+}
+
 extern int     xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta,
                                 bool reserved);
 extern int     xfs_mod_frextents(struct xfs_mount *mp, int64_t delta);
index d84714e4e46a2a536f3920bc29d63776f0e79f40..54be9d64093edacfaea509ff2d1e73cf61a58493 100644 (file)
@@ -815,7 +815,8 @@ xfs_fs_statfs(
        spin_unlock(&mp->m_sb_lock);
 
        /* make sure statp->f_bfree does not underflow */
-       statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0);
+       statp->f_bfree = max_t(int64_t, 0,
+                               fdblocks - xfs_fdblocks_unavailable(mp));
        statp->f_bavail = statp->f_bfree;
 
        fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
index 917a69f0a6fffd2c4e3acd751adf0a5da35bf7d1..0ac717aad380075848af3fc8157e0e5b3c9ed631 100644 (file)
@@ -836,6 +836,7 @@ __xfs_trans_commit(
        bool                    regrant)
 {
        struct xfs_mount        *mp = tp->t_mountp;
+       struct xlog             *log = mp->m_log;
        xfs_csn_t               commit_seq = 0;
        int                     error = 0;
        int                     sync = tp->t_flags & XFS_TRANS_SYNC;
@@ -864,7 +865,13 @@ __xfs_trans_commit(
        if (!(tp->t_flags & XFS_TRANS_DIRTY))
                goto out_unreserve;
 
-       if (xfs_is_shutdown(mp)) {
+       /*
+        * We must check against log shutdown here because we cannot abort log
+        * items and leave them dirty, inconsistent and unpinned in memory while
+        * the log is active. This leaves them open to being written back to
+        * disk, and that will lead to on-disk corruption.
+        */
+       if (xlog_is_shutdown(log)) {
                error = -EIO;
                goto out_unreserve;
        }
@@ -878,7 +885,7 @@ __xfs_trans_commit(
                xfs_trans_apply_sb_deltas(tp);
        xfs_trans_apply_dquot_deltas(tp);
 
-       xlog_cil_commit(mp->m_log, tp, &commit_seq, regrant);
+       xlog_cil_commit(log, tp, &commit_seq, regrant);
 
        xfs_trans_free(tp);
 
@@ -905,10 +912,10 @@ out_unreserve:
         */
        xfs_trans_unreserve_and_mod_dquots(tp);
        if (tp->t_ticket) {
-               if (regrant && !xlog_is_shutdown(mp->m_log))
-                       xfs_log_ticket_regrant(mp->m_log, tp->t_ticket);
+               if (regrant && !xlog_is_shutdown(log))
+                       xfs_log_ticket_regrant(log, tp->t_ticket);
                else
-                       xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
+                       xfs_log_ticket_ungrant(log, tp->t_ticket);
                tp->t_ticket = NULL;
        }
        xfs_trans_free_items(tp, !!error);
@@ -926,18 +933,27 @@ xfs_trans_commit(
 }
 
 /*
- * Unlock all of the transaction's items and free the transaction.
- * The transaction must not have modified any of its items, because
- * there is no way to restore them to their previous state.
+ * Unlock all of the transaction's items and free the transaction.  If the
+ * transaction is dirty, we must shut down the filesystem because there is no
+ * way to restore them to their previous state.
  *
- * If the transaction has made a log reservation, make sure to release
- * it as well.
+ * If the transaction has made a log reservation, make sure to release it as
+ * well.
+ *
+ * This is a high level function (equivalent to xfs_trans_commit()) and so can
+ * be called after the transaction has effectively been aborted due to the mount
+ * being shut down. However, if the mount has not been shut down and the
+ * transaction is dirty we will shut the mount down and, in doing so, that
+ * guarantees that the log is shut down, too. Hence we don't need to be as
+ * careful with shutdown state and dirty items here as we need to be in
+ * xfs_trans_commit().
  */
 void
 xfs_trans_cancel(
        struct xfs_trans        *tp)
 {
        struct xfs_mount        *mp = tp->t_mountp;
+       struct xlog             *log = mp->m_log;
        bool                    dirty = (tp->t_flags & XFS_TRANS_DIRTY);
 
        trace_xfs_trans_cancel(tp, _RET_IP_);
@@ -955,16 +971,18 @@ xfs_trans_cancel(
        }
 
        /*
-        * See if the caller is relying on us to shut down the
-        * filesystem.  This happens in paths where we detect
-        * corruption and decide to give up.
+        * See if the caller is relying on us to shut down the filesystem. We
+        * only want an error report if there isn't already a shutdown in
+        * progress, so we only need to check against the mount shutdown state
+        * here.
         */
        if (dirty && !xfs_is_shutdown(mp)) {
                XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
                xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
        }
 #ifdef DEBUG
-       if (!dirty && !xfs_is_shutdown(mp)) {
+       /* Log items need to be consistent until the log is shut down. */
+       if (!dirty && !xlog_is_shutdown(log)) {
                struct xfs_log_item *lip;
 
                list_for_each_entry(lip, &tp->t_items, li_trans)
@@ -975,7 +993,7 @@ xfs_trans_cancel(
        xfs_trans_unreserve_and_mod_dquots(tp);
 
        if (tp->t_ticket) {
-               xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
+               xfs_log_ticket_ungrant(log, tp->t_ticket);
                tp->t_ticket = NULL;
        }
 
index c2ccb98c7bcdc7bd7e59f08a66a57ebd73dadbcf..d3a97a028560651828a5e73b75f8d3585a5a96eb 100644 (file)
@@ -873,17 +873,17 @@ xfs_trans_ail_delete(
        int                     shutdown_type)
 {
        struct xfs_ail          *ailp = lip->li_ailp;
-       struct xfs_mount        *mp = ailp->ail_log->l_mp;
+       struct xlog             *log = ailp->ail_log;
        xfs_lsn_t               tail_lsn;
 
        spin_lock(&ailp->ail_lock);
        if (!test_bit(XFS_LI_IN_AIL, &lip->li_flags)) {
                spin_unlock(&ailp->ail_lock);
-               if (shutdown_type && !xlog_is_shutdown(ailp->ail_log)) {
-                       xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
+               if (shutdown_type && !xlog_is_shutdown(log)) {
+                       xfs_alert_tag(log->l_mp, XFS_PTAG_AILDELETE,
        "%s: attempting to delete a log item that is not in the AIL",
                                        __func__);
-                       xfs_force_shutdown(mp, shutdown_type);
+                       xlog_force_shutdown(log, shutdown_type);
                }
                return;
        }
index 3f7f01f03869059f1390b91a0d63831060cafb50..c4b78c21d793056f8087711b83bfcdedc801f90c 100644 (file)
@@ -509,7 +509,6 @@ extern int unregister_acpi_notifier(struct notifier_block *);
  * External Functions
  */
 
-int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device);
 struct acpi_device *acpi_fetch_acpi_dev(acpi_handle handle);
 acpi_status acpi_bus_get_status_handle(acpi_handle handle,
                                       unsigned long long *sta);
index c08758b6b364206d656a15979cc166e0e4fb3e58..c05d2ce9b6cd85bb9c2c439cf35160b4cdbcc838 100644 (file)
@@ -269,6 +269,7 @@ bool hv_isolation_type_snp(void);
 u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
 void hyperv_cleanup(void);
 bool hv_query_ext_cap(u64 cap_query);
+void hv_setup_dma_ops(struct device *dev, bool coherent);
 void *hv_map_memory(void *addr, unsigned long size);
 void hv_unmap_memory(void *addr);
 #else /* CONFIG_HYPERV */
index fd7feb5c789485864f0e6a596eaa209f04819996..eee6f7763a39f87e63faf4b2ed7ce32877d45733 100644 (file)
@@ -565,10 +565,14 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)       \
        do {                                                    \
                unsigned long _sz = huge_page_size(h);          \
-               if (_sz == PMD_SIZE)                            \
-                       tlb_flush_pmd_range(tlb, address, _sz); \
-               else if (_sz == PUD_SIZE)                       \
+               if (_sz >= P4D_SIZE)                            \
+                       tlb_flush_p4d_range(tlb, address, _sz); \
+               else if (_sz >= PUD_SIZE)                       \
                        tlb_flush_pud_range(tlb, address, _sz); \
+               else if (_sz >= PMD_SIZE)                       \
+                       tlb_flush_pmd_range(tlb, address, _sz); \
+               else                                            \
+                       tlb_flush_pte_range(tlb, address, _sz); \
                __tlb_remove_tlb_entry(tlb, ptep, address);     \
        } while (0)
 
index 8fc637379899fd38b37bdbecaca6358a2f917dc8..df30f11b4a46011fdbcefc3d0a9287ebc62e8daa 100644 (file)
@@ -143,7 +143,7 @@ static inline void put_unaligned_be48(const u64 val, void *p)
 
 static inline u64 __get_unaligned_be48(const u8 *p)
 {
-       return (u64)p[0] << 40 | (u64)p[1] << 32 | p[2] << 24 |
+       return (u64)p[0] << 40 | (u64)p[1] << 32 | (u64)p[2] << 24 |
                p[3] << 16 | p[4] << 8 | p[5];
 }
 
diff --git a/include/dt-bindings/clock/sun6i-rtc.h b/include/dt-bindings/clock/sun6i-rtc.h
new file mode 100644 (file)
index 0000000..c845493
--- /dev/null
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+
+#ifndef _DT_BINDINGS_CLK_SUN6I_RTC_H_
+#define _DT_BINDINGS_CLK_SUN6I_RTC_H_
+
+#define CLK_OSC32K             0
+#define CLK_OSC32K_FANOUT      1
+#define CLK_IOSC               2
+
+#endif /* _DT_BINDINGS_CLK_SUN6I_RTC_H_ */
index 338aa27e4773b5fdf5793b039ffd9c1b975a247f..edb7f6d41faa043d0a59b80eb0ba30307dea8fdf 100644 (file)
@@ -80,12 +80,6 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
 
 #ifdef CONFIG_BALLOON_COMPACTION
 extern const struct address_space_operations balloon_aops;
-extern bool balloon_page_isolate(struct page *page,
-                               isolate_mode_t mode);
-extern void balloon_page_putback(struct page *page);
-extern int balloon_page_migrate(struct address_space *mapping,
-                               struct page *newpage,
-                               struct page *page, enum migrate_mode mode);
 
 /*
  * balloon_page_insert - insert a page into the balloon's page list and make
@@ -155,22 +149,6 @@ static inline void balloon_page_delete(struct page *page)
        list_del(&page->lru);
 }
 
-static inline bool balloon_page_isolate(struct page *page)
-{
-       return false;
-}
-
-static inline void balloon_page_putback(struct page *page)
-{
-       return;
-}
-
-static inline int balloon_page_migrate(struct page *newpage,
-                               struct page *page, enum migrate_mode mode)
-{
-       return 0;
-}
-
 static inline gfp_t balloon_mapping_gfp_mask(void)
 {
        return GFP_HIGHUSER;
index f2ad8ed8f777cf6df31796fe52fa1eeeb861027b..652cd05b0924c3d687716a0f4bb42e5c7f8d331e 100644 (file)
@@ -95,7 +95,10 @@ struct blkcg_gq {
 
        spinlock_t                      async_bio_lock;
        struct bio_list                 async_bios;
-       struct work_struct              async_bio_work;
+       union {
+               struct work_struct      async_bio_work;
+               struct work_struct      free_work;
+       };
 
        atomic_t                        use_delay;
        atomic64_t                      delay_nsec;
index dd0763a1c6740f74b6a5fe4fb6d35d70ed610d6c..1973ef9bd40fcfbdff0bc7817272e8963f59c80a 100644 (file)
@@ -85,8 +85,10 @@ struct block_device {
  */
 #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
 typedef u32 __bitwise blk_status_t;
+typedef u32 blk_short_t;
 #else
 typedef u8 __bitwise blk_status_t;
+typedef u16 blk_short_t;
 #endif
 #define        BLK_STS_OK 0
 #define BLK_STS_NOTSUPP                ((__force blk_status_t)1)
index cf32123b39f5af4113af5393a9666f5f0c42c17b..57c8ec44ab4e69ca5489a04b3da6bdc5ec05ab21 100644 (file)
@@ -9,4 +9,6 @@
 int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode);
 int sunxi_ccu_get_mmc_timing_mode(struct clk *clk);
 
+int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg);
+
 #endif
index 90fd742fd1ef5436f100bea5d3bcbdafafcacf33..a6f637342740b921412eb183b4416fa72f128a9f 100644 (file)
  */
 #ifdef CONFIG_CMA_AREAS
 #define MAX_CMA_AREAS  (1 + CONFIG_CMA_AREAS)
-
-#else
-#define MAX_CMA_AREAS  (0)
-
 #endif
 
 #define CMA_MAX_NAME 64
index fec374f69e125506a507e92ec9486e53c79c0de2..ec7f25def39290cfb331f0350f431cd8afd249b6 100644 (file)
@@ -61,6 +61,21 @@ to_dma_fence_array(struct dma_fence *fence)
        return container_of(fence, struct dma_fence_array, base);
 }
 
+/**
+ * dma_fence_array_for_each - iterate over all fences in array
+ * @fence: current fence
+ * @index: index into the array
+ * @head: potential dma_fence_array object
+ *
+ * Test if @array is a dma_fence_array object and if yes iterate over all fences
+ * in the array. If not just iterate over the fence in @array itself.
+ *
+ * For a deep dive iterator see dma_fence_unwrap_for_each().
+ */
+#define dma_fence_array_for_each(fence, index, head)                   \
+       for (index = 0, fence = dma_fence_array_first(head); fence;     \
+            ++(index), fence = dma_fence_array_next(head, index))
+
 struct dma_fence_array *dma_fence_array_create(int num_fences,
                                               struct dma_fence **fences,
                                               u64 context, unsigned seqno,
@@ -68,4 +83,8 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
 
 bool dma_fence_match_context(struct dma_fence *fence, u64 context);
 
+struct dma_fence *dma_fence_array_first(struct dma_fence *head);
+struct dma_fence *dma_fence_array_next(struct dma_fence *head,
+                                      unsigned int index);
+
 #endif /* __LINUX_DMA_FENCE_ARRAY_H */
index 10d51bcdf7b7946a675203d0e37110454e9256c4..4bdf0b96da28312c8e33755ad6ffa1befdd022ae 100644 (file)
@@ -112,6 +112,8 @@ static inline void dma_fence_chain_free(struct dma_fence_chain *chain)
  *
  * Iterate over all fences in the chain. We keep a reference to the current
  * fence while inside the loop which must be dropped when breaking out.
+ *
+ * For a deep dive iterator see dma_fence_unwrap_for_each().
  */
 #define dma_fence_chain_for_each(iter, head)   \
        for (iter = dma_fence_get(head); iter; \
diff --git a/include/linux/dma-fence-unwrap.h b/include/linux/dma-fence-unwrap.h
new file mode 100644 (file)
index 0000000..77e335a
--- /dev/null
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * fence-chain: chain fences together in a timeline
+ *
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ * Authors:
+ *     Christian König <christian.koenig@amd.com>
+ */
+
+#ifndef __LINUX_DMA_FENCE_UNWRAP_H
+#define __LINUX_DMA_FENCE_UNWRAP_H
+
+#include <linux/dma-fence-chain.h>
+#include <linux/dma-fence-array.h>
+
+/**
+ * struct dma_fence_unwrap - cursor into the container structure
+ *
+ * Should be used with dma_fence_unwrap_for_each() iterator macro.
+ */
+struct dma_fence_unwrap {
+       /**
+        * @chain: potential dma_fence_chain, but can be other fence as well
+        */
+       struct dma_fence *chain;
+       /**
+        * @array: potential dma_fence_array, but can be other fence as well
+        */
+       struct dma_fence *array;
+       /**
+        * @index: last returned index if @array is really a dma_fence_array
+        */
+       unsigned int index;
+};
+
+/* Internal helper to start new array iteration, don't use directly */
+static inline struct dma_fence *
+__dma_fence_unwrap_array(struct dma_fence_unwrap * cursor)
+{
+       cursor->array = dma_fence_chain_contained(cursor->chain);
+       cursor->index = 0;
+       return dma_fence_array_first(cursor->array);
+}
+
+/**
+ * dma_fence_unwrap_first - return the first fence from fence containers
+ * @head: the entrypoint into the containers
+ * @cursor: current position inside the containers
+ *
+ * Unwraps potential dma_fence_chain/dma_fence_array containers and return the
+ * first fence.
+ */
+static inline struct dma_fence *
+dma_fence_unwrap_first(struct dma_fence *head, struct dma_fence_unwrap *cursor)
+{
+       cursor->chain = dma_fence_get(head);
+       return __dma_fence_unwrap_array(cursor);
+}
+
+/**
+ * dma_fence_unwrap_next - return the next fence from a fence containers
+ * @cursor: current position inside the containers
+ *
+ * Continue unwrapping the dma_fence_chain/dma_fence_array containers and return
+ * the next fence from them.
+ */
+static inline struct dma_fence *
+dma_fence_unwrap_next(struct dma_fence_unwrap *cursor)
+{
+       struct dma_fence *tmp;
+
+       ++cursor->index;
+       tmp = dma_fence_array_next(cursor->array, cursor->index);
+       if (tmp)
+               return tmp;
+
+       cursor->chain = dma_fence_chain_walk(cursor->chain);
+       return __dma_fence_unwrap_array(cursor);
+}
+
+/**
+ * dma_fence_unwrap_for_each - iterate over all fences in containers
+ * @fence: current fence
+ * @cursor: current position inside the containers
+ * @head: starting point for the iterator
+ *
+ * Unwrap dma_fence_chain and dma_fence_array containers and deep dive into all
+ * potential fences in them. If @head is just a normal fence only that one is
+ * returned.
+ */
+#define dma_fence_unwrap_for_each(fence, cursor, head)                 \
+       for (fence = dma_fence_unwrap_first(head, cursor); fence;       \
+            fence = dma_fence_unwrap_next(cursor))
+
+#endif
index 18316087213384ea69cdbb6cb2b5d95204a0bb69..bbde95387a23af8daf91f0926069a2090c35bd0f 100644 (file)
@@ -275,7 +275,6 @@ enum positive_aop_returns {
        AOP_TRUNCATED_PAGE      = 0x80001,
 };
 
-#define AOP_FLAG_CONT_EXPAND           0x0001 /* called from cont_expand */
 #define AOP_FLAG_NOFS                  0x0002 /* used by filesystem to direct
                                                * helper code (eg buffer layer)
                                                * to clear GFP_FS from alloc */
@@ -338,28 +337,6 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
        return kiocb->ki_complete == NULL;
 }
 
-/*
- * "descriptor" for what we're up to with a read.
- * This allows us to use the same read code yet
- * have multiple different users of the data that
- * we read from a file.
- *
- * The simplest case just copies the data to user
- * mode.
- */
-typedef struct {
-       size_t written;
-       size_t count;
-       union {
-               char __user *buf;
-               void *data;
-       } arg;
-       int error;
-} read_descriptor_t;
-
-typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
-               unsigned long, unsigned long);
-
 struct address_space_operations {
        int (*writepage)(struct page *page, struct writeback_control *wbc);
        int (*readpage)(struct file *, struct page *);
@@ -370,12 +347,6 @@ struct address_space_operations {
        /* Mark a folio dirty.  Return true if this dirtied it */
        bool (*dirty_folio)(struct address_space *, struct folio *);
 
-       /*
-        * Reads in the requested pages. Unlike ->readpage(), this is
-        * PURELY used for read-ahead!.
-        */
-       int (*readpages)(struct file *filp, struct address_space *mapping,
-                       struct list_head *pages, unsigned nr_pages);
        void (*readahead)(struct readahead_control *);
 
        int (*write_begin)(struct file *, struct address_space *mapping,
@@ -3027,7 +2998,7 @@ extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
 extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
 extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
 extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *);
-extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
+ssize_t generic_perform_write(struct kiocb *, struct iov_iter *);
 
 ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
                rwf_t flags);
index d44ff747a657ef7806fab862cce51f245b99d344..e25539072463ba0dc43e872b79abc02b3316d48d 100644 (file)
@@ -456,6 +456,20 @@ int fscache_begin_read_operation(struct netfs_cache_resources *cres,
        return -ENOBUFS;
 }
 
+/**
+ * fscache_end_operation - End the read operation for the netfs lib
+ * @cres: The cache resources for the read operation
+ *
+ * Clean up the resources at the end of the read request.
+ */
+static inline void fscache_end_operation(struct netfs_cache_resources *cres)
+{
+       const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
+
+       if (ops)
+               ops->end_operation(cres);
+}
+
 /**
  * fscache_read - Start a read from the cache.
  * @cres: The cache resources to use
@@ -559,7 +573,6 @@ int fscache_write(struct netfs_cache_resources *cres,
 
 /**
  * fscache_clear_page_bits - Clear the PG_fscache bits from a set of pages
- * @cookie: The cookie representing the cache object
  * @mapping: The netfs inode to use as the source
  * @start: The start position in @mapping
  * @len: The amount of data to unlock
@@ -568,8 +581,7 @@ int fscache_write(struct netfs_cache_resources *cres,
  * Clear the PG_fscache flag from a sequence of pages and wake up anyone who's
  * waiting.
  */
-static inline void fscache_clear_page_bits(struct fscache_cookie *cookie,
-                                          struct address_space *mapping,
+static inline void fscache_clear_page_bits(struct address_space *mapping,
                                           loff_t start, size_t len,
                                           bool caching)
 {
index b568b3c7d095ea76edf6923ce316ff064dcad9a3..a7afc800bd8d27a927cce6c1eb08680d828fde38 100644 (file)
@@ -221,7 +221,7 @@ static inline void fsverity_enqueue_verify_work(struct work_struct *work)
  *
  * This checks whether ->i_verity_info has been set.
  *
- * Filesystems call this from ->readpages() to check whether the pages need to
+ * Filesystems call this from ->readahead() to check whether the pages need to
  * be verified or not.  Don't use IS_VERITY() for this purpose; it's subject to
  * a race condition where the file is being read concurrently with
  * FS_IOC_ENABLE_VERITY completing.  (S_VERITY is set before ->i_verity_info.)
index ed8cf433a46a346a3f2b576997b9d4476c4723b4..4816b7e110472c06063dec6d67cac1c5419e2a6b 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/trace_recursion.h>
 #include <linux/trace_clock.h>
+#include <linux/jump_label.h>
 #include <linux/kallsyms.h>
 #include <linux/linkage.h>
 #include <linux/bitops.h>
@@ -1018,7 +1019,20 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
 extern int register_ftrace_graph(struct fgraph_ops *ops);
 extern void unregister_ftrace_graph(struct fgraph_ops *ops);
 
-extern bool ftrace_graph_is_dead(void);
+/**
+ * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
+ *
+ * ftrace_graph_stop() is called when a severe error is detected in
+ * the function graph tracing. This function is called by the critical
+ * paths of function graph to keep those paths from doing any more harm.
+ */
+DECLARE_STATIC_KEY_FALSE(kill_ftrace_graph);
+
+static inline bool ftrace_graph_is_dead(void)
+{
+       return static_branch_unlikely(&kill_ftrace_graph);
+}
+
 extern void ftrace_graph_stop(void);
 
 /* The current handlers in use */
index 0fa17fb85de5dcac00254860a317352082781892..3e3d36fc210982d5d23fae91408d6b602fc1517f 100644 (file)
@@ -264,9 +264,7 @@ struct vm_area_struct;
 #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
 
 /* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT (24 +                                         \
-                         3 * IS_ENABLED(CONFIG_KASAN_HW_TAGS) +        \
-                         IS_ENABLED(CONFIG_LOCKDEP))
+#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP))
 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
 
 /**
@@ -615,9 +613,11 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
 #ifdef CONFIG_NUMA
 struct page *alloc_pages(gfp_t gfp, unsigned int order);
 struct folio *folio_alloc(gfp_t gfp, unsigned order);
-extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
+struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
                        struct vm_area_struct *vma, unsigned long addr,
                        bool hugepage);
+struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
+               unsigned long addr, bool hugepage);
 #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
        alloc_pages_vma(gfp_mask, order, vma, addr, true)
 #else
@@ -629,8 +629,10 @@ static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order)
 {
        return __folio_alloc_node(gfp, order, numa_node_id());
 }
-#define alloc_pages_vma(gfp_mask, order, vma, addr, false)\
+#define alloc_pages_vma(gfp_mask, order, vma, addr, hugepage) \
        alloc_pages(gfp_mask, order)
+#define vma_alloc_folio(gfp, order, vma, addr, hugepage)               \
+       folio_alloc(gfp, order)
 #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
        alloc_pages(gfp_mask, order)
 #endif
index c3aa8b330e1c67f57e92faab99e8a4c471f39a31..e71f6e1bfafed8f9ab90bd0f4368bd03272470e9 100644 (file)
@@ -688,7 +688,7 @@ void acpi_dev_remove_driver_gpios(struct acpi_device *adev);
 int devm_acpi_dev_add_driver_gpios(struct device *dev,
                                   const struct acpi_gpio_mapping *gpios);
 
-struct gpio_desc *acpi_get_and_request_gpiod(char *path, int pin, char *label);
+struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin, char *label);
 
 #else  /* CONFIG_GPIOLIB && CONFIG_ACPI */
 
@@ -705,6 +705,12 @@ static inline int devm_acpi_dev_add_driver_gpios(struct device *dev,
        return -ENXIO;
 }
 
+static inline struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin,
+                                                          char *label)
+{
+       return ERR_PTR(-ENOSYS);
+}
+
 #endif /* CONFIG_GPIOLIB && CONFIG_ACPI */
 
 
index b0728c8ad90ce9aad6cad7438c46e201670e419e..874aabd270c9bf7a7a649a2de9d37f05b42f77a8 100644 (file)
@@ -168,13 +168,16 @@ struct gpio_irq_chip {
 
        /**
         * @parent_handler_data:
+        *
+        * If @per_parent_data is false, @parent_handler_data is a single
+        * pointer used as the data associated with every parent interrupt.
+        *
         * @parent_handler_data_array:
         *
-        * Data associated, and passed to, the handler for the parent
-        * interrupt. Can either be a single pointer if @per_parent_data
-        * is false, or an array of @num_parents pointers otherwise.  If
-        * @per_parent_data is true, @parent_handler_data_array cannot be
-        * NULL.
+        * If @per_parent_data is true, @parent_handler_data_array is
+        * an array of @num_parents pointers, and is used to associate
+        * different data for each parent. This cannot be NULL if
+        * @per_parent_data is true.
         */
        union {
                void *parent_handler_data;
@@ -218,6 +221,15 @@ struct gpio_irq_chip {
         */
        bool per_parent_data;
 
+       /**
+        * @initialized:
+        *
+        * Flag to track GPIO chip irq member's initialization.
+        * This flag will make sure GPIO chip irq members are not used
+        * before they are initialized.
+        */
+       bool initialized;
+
        /**
         * @init_hw: optional routine to initialize hardware before
         * an IRQ chip will be added. This is quite useful when
index 0354b298d874908319d9d3b00b3fe9780a32e9c8..49790c1bd2c43e21615fda95cbf3febb22ba6da0 100644 (file)
@@ -475,6 +475,8 @@ static inline void input_set_events_per_packet(struct input_dev *dev, int n_even
 void input_alloc_absinfo(struct input_dev *dev);
 void input_set_abs_params(struct input_dev *dev, unsigned int axis,
                          int min, int max, int fuzz, int flat);
+void input_copy_abs(struct input_dev *dst, unsigned int dst_axis,
+                   const struct input_dev *src, unsigned int src_axis);
 
 #define INPUT_GENERATE_ABS_ACCESSORS(_suffix, _item)                   \
 static inline int input_abs_get_##_suffix(struct input_dev *dev,       \
diff --git a/include/linux/input/vivaldi-fmap.h b/include/linux/input/vivaldi-fmap.h
new file mode 100644 (file)
index 0000000..7e4b702
--- /dev/null
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _VIVALDI_FMAP_H
+#define _VIVALDI_FMAP_H
+
+#include <linux/types.h>
+
+#define VIVALDI_MAX_FUNCTION_ROW_KEYS  24
+
+/**
+ * struct vivaldi_data - Function row map data for ChromeOS Vivaldi keyboards
+ * @function_row_physmap: An array of scancodes or their equivalent (HID usage
+ *                        codes, encoded rows/columns, etc) for the top
+ *                        row function keys, in an order from left to right
+ * @num_function_row_keys: The number of top row keys in a custom keyboard
+ *
+ * This structure is supposed to be used by ChromeOS keyboards using
+ * the Vivaldi keyboard function row design.
+ */
+struct vivaldi_data {
+       u32 function_row_physmap[VIVALDI_MAX_FUNCTION_ROW_KEYS];
+       unsigned int num_function_row_keys;
+};
+
+ssize_t vivaldi_function_row_physmap_show(const struct vivaldi_data *data,
+                                         char *buf);
+
+#endif /* _VIVALDI_FMAP_H */
index 08ba5995aa8bbe84cc8bd802303346aeb60f09cb..a890428bcc1a2351b0266c5274dbd5f329a37329 100644 (file)
 }                                      \
 )
 
-/**
- * lower_48_bits() - return bits 0-47 of a number
- * @n: the number we're accessing
- */
-static inline u64 lower_48_bits(u64 n)
-{
-       return n & ((1ull << 48) - 1);
-}
-
 /**
  * upper_32_bits - return bits 32-63 of a number
  * @n: the number we're accessing
index f49e64222628ae3a1a15df5c3a111c9f434b135c..726857a4b68054130800b7e22c6904ff324352c2 100644 (file)
@@ -204,6 +204,22 @@ static __always_inline __must_check bool kfence_free(void *addr)
  */
 bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs);
 
+#ifdef CONFIG_PRINTK
+struct kmem_obj_info;
+/**
+ * __kfence_obj_info() - fill kmem_obj_info struct
+ * @kpp: kmem_obj_info to be filled
+ * @object: the object
+ *
+ * Return:
+ * * false - not a KFENCE object
+ * * true - a KFENCE object, filled @kpp
+ *
+ * Copies information to @kpp for KFENCE objects.
+ */
+bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
+#endif
+
 #else /* CONFIG_KFENCE */
 
 static inline bool is_kfence_address(const void *addr) { return false; }
@@ -221,6 +237,14 @@ static inline bool __must_check kfence_handle_page_fault(unsigned long addr, boo
        return false;
 }
 
+#ifdef CONFIG_PRINTK
+struct kmem_obj_info;
+static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+{
+       return false;
+}
+#endif
+
 #endif
 
 #endif /* _LINUX_KFENCE_H */
index c7b47399b36ae7eacb57270df504504fb9aeb4dd..57fb972fea05ba7e4be249565c55101a87cc826b 100644 (file)
@@ -120,7 +120,6 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
 struct kobj_type {
        void (*release)(struct kobject *kobj);
        const struct sysfs_ops *sysfs_ops;
-       struct attribute **default_attrs;       /* use default_groups instead */
        const struct attribute_group **default_groups;
        const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
        const void *(*namespace)(struct kobject *kobj);
index 9536ffa0473b9d1d1dad23862468bcd14b943ff1..3f9b22c4983a85704667a89e274c364326980574 100644 (file)
@@ -148,6 +148,7 @@ static inline bool is_error_page(struct page *page)
 #define KVM_REQUEST_MASK           GENMASK(7,0)
 #define KVM_REQUEST_NO_WAKEUP      BIT(8)
 #define KVM_REQUEST_WAIT           BIT(9)
+#define KVM_REQUEST_NO_ACTION      BIT(10)
 /*
  * Architecture-independent vcpu->requests bit members
  * Bits 4-7 are reserved for more arch-independent bits.
@@ -156,9 +157,18 @@ static inline bool is_error_page(struct page *page)
 #define KVM_REQ_VM_DEAD           (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQ_UNBLOCK           2
 #define KVM_REQ_UNHALT            3
-#define KVM_REQ_GPC_INVALIDATE    (5 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQUEST_ARCH_BASE     8
 
+/*
+ * KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to
+ * OUTSIDE_GUEST_MODE.  KVM_REQ_OUTSIDE_GUEST_MODE differs from a vCPU "kick"
+ * in that it ensures the vCPU has reached OUTSIDE_GUEST_MODE before continuing
+ * on.  A kick only guarantees that the vCPU is on its way out, e.g. a previous
+ * kick may have set vcpu->mode to EXITING_GUEST_MODE, and so there's no
+ * guarantee the vCPU received an IPI and has actually exited guest mode.
+ */
+#define KVM_REQ_OUTSIDE_GUEST_MODE     (KVM_REQUEST_NO_ACTION | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+
 #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
        BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
        (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
@@ -1221,27 +1231,27 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
  * @gpc:          struct gfn_to_pfn_cache object.
  * @vcpu:         vCPU to be used for marking pages dirty and to be woken on
  *                invalidation.
- * @guest_uses_pa: indicates that the resulting host physical PFN is used while
- *                @vcpu is IN_GUEST_MODE so invalidations should wake it.
- * @kernel_map:    requests a kernel virtual mapping (kmap / memremap).
+ * @usage:        indicates if the resulting host physical PFN is used while
+ *                the @vcpu is IN_GUEST_MODE (in which case invalidation of 
+ *                the cache from MMU notifiers---but not for KVM memslot
+ *                changes!---will also force @vcpu to exit the guest and
+ *                refresh the cache); and/or if the PFN used directly
+ *                by KVM (and thus needs a kernel virtual mapping).
  * @gpa:          guest physical address to map.
  * @len:          sanity check; the range being access must fit a single page.
- * @dirty:         mark the cache dirty immediately.
  *
  * @return:       0 for success.
  *                -EINVAL for a mapping which would cross a page boundary.
  *                 -EFAULT for an untranslatable guest physical address.
  *
  * This primes a gfn_to_pfn_cache and links it into the @kvm's list for
- * invalidations to be processed. Invalidation callbacks to @vcpu using
- * %KVM_REQ_GPC_INVALIDATE will occur only for MMU notifiers, not for KVM
- * memslot changes. Callers are required to use kvm_gfn_to_pfn_cache_check()
- * to ensure that the cache is valid before accessing the target page.
+ * invalidations to be processed.  Callers are required to use
+ * kvm_gfn_to_pfn_cache_check() to ensure that the cache is valid before
+ * accessing the target page.
  */
 int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
-                             struct kvm_vcpu *vcpu, bool guest_uses_pa,
-                             bool kernel_map, gpa_t gpa, unsigned long len,
-                             bool dirty);
+                             struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
+                             gpa_t gpa, unsigned long len);
 
 /**
  * kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache.
@@ -1250,7 +1260,6 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
  * @gpc:          struct gfn_to_pfn_cache object.
  * @gpa:          current guest physical address to map.
  * @len:          sanity check; the range being access must fit a single page.
- * @dirty:         mark the cache dirty immediately.
  *
  * @return:       %true if the cache is still valid and the address matches.
  *                %false if the cache is not valid.
@@ -1272,7 +1281,6 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
  * @gpc:          struct gfn_to_pfn_cache object.
  * @gpa:          updated guest physical address to map.
  * @len:          sanity check; the range being access must fit a single page.
- * @dirty:         mark the cache dirty immediately.
  *
  * @return:       0 for success.
  *                -EINVAL for a mapping which would cross a page boundary.
@@ -1285,7 +1293,7 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
  * with the lock still held to permit access.
  */
 int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
-                                gpa_t gpa, unsigned long len, bool dirty);
+                                gpa_t gpa, unsigned long len);
 
 /**
  * kvm_gfn_to_pfn_cache_unmap - temporarily unmap a gfn_to_pfn_cache.
@@ -1293,10 +1301,9 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
  * @kvm:          pointer to kvm instance.
  * @gpc:          struct gfn_to_pfn_cache object.
  *
- * This unmaps the referenced page and marks it dirty, if appropriate. The
- * cache is left in the invalid state but at least the mapping from GPA to
- * userspace HVA will remain cached and can be reused on a subsequent
- * refresh.
+ * This unmaps the referenced page. The cache is left in the invalid state
+ * but at least the mapping from GPA to userspace HVA will remain cached
+ * and can be reused on a subsequent refresh.
  */
 void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
 
@@ -1984,7 +1991,7 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
 
 void kvm_arch_irq_routing_update(struct kvm *kvm);
 
-static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
+static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu)
 {
        /*
         * Ensure the rest of the request is published to kvm_check_request's
@@ -1994,6 +2001,19 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
        set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
 }
 
+static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
+{
+       /*
+        * Request that don't require vCPU action should never be logged in
+        * vcpu->requests.  The vCPU won't clear the request, so it will stay
+        * logged indefinitely and prevent the vCPU from entering the guest.
+        */
+       BUILD_BUG_ON(!__builtin_constant_p(req) ||
+                    (req & KVM_REQUEST_NO_ACTION));
+
+       __kvm_make_request(req, vcpu);
+}
+
 static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
 {
        return READ_ONCE(vcpu->requests);
index dceac12c1ce571937a68cd90dc3181102183b59b..ac1ebb37a0ffd7ce5f0e37363602d784d10257ea 100644 (file)
@@ -18,6 +18,7 @@ struct kvm_memslots;
 
 enum kvm_mr_change;
 
+#include <linux/bits.h>
 #include <linux/types.h>
 #include <linux/spinlock_types.h>
 
@@ -46,6 +47,12 @@ typedef u64            hfn_t;
 
 typedef hfn_t kvm_pfn_t;
 
+enum pfn_cache_usage {
+       KVM_GUEST_USES_PFN = BIT(0),
+       KVM_HOST_USES_PFN  = BIT(1),
+       KVM_GUEST_AND_HOST_USE_PFN = KVM_GUEST_USES_PFN | KVM_HOST_USES_PFN,
+};
+
 struct gfn_to_hva_cache {
        u64 generation;
        gpa_t gpa;
@@ -64,11 +71,9 @@ struct gfn_to_pfn_cache {
        rwlock_t lock;
        void *khva;
        kvm_pfn_t pfn;
+       enum pfn_cache_usage usage;
        bool active;
        bool valid;
-       bool dirty;
-       bool kernel_map;
-       bool guest_uses_pa;
 };
 
 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
index 6d635e8306d645a820b54fe91ee4a5c874b634e3..975e33b793a774ecc72580c46c0123da725273f1 100644 (file)
@@ -44,9 +44,9 @@ static inline void local_lock_debug_init(local_lock_t *l)
 }
 #else /* CONFIG_DEBUG_LOCK_ALLOC */
 # define LOCAL_LOCK_DEBUG_INIT(lockname)
-# define local_lock_acquire(__ll)  do { typecheck(local_lock_t *, __ll); } while (0)
-# define local_lock_release(__ll)  do { typecheck(local_lock_t *, __ll); } while (0)
-# define local_lock_debug_init(__ll)  do { typecheck(local_lock_t *, __ll); } while (0)
+static inline void local_lock_acquire(local_lock_t *l) { }
+static inline void local_lock_release(local_lock_t *l) { }
+static inline void local_lock_debug_init(local_lock_t *l) { }
 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
 
 #define INIT_LOCAL_LOCK(lockname)      { LOCAL_LOCK_DEBUG_INIT(lockname) }
index 808bb4cee2300ecf21611f492cb1fcbf1e519717..b0da04fe087bb8cb32de8f62a4a52abbaa4d5b80 100644 (file)
@@ -86,6 +86,8 @@ struct cmos_rtc_board_info {
    /* 2 values for divider stage reset, others for "testing purposes only" */
 #  define RTC_DIV_RESET1       0x60
 #  define RTC_DIV_RESET2       0x70
+   /* In AMD BKDG bit 5 and 6 are reserved, bit 4 is for select dv0 bank */
+#  define RTC_AMD_BANK_SELECT  0x10
   /* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */
 # define RTC_RATE_SELECT       0x0F
 
index 71101d1ec825e9d4ba63f4aaae168209d8b724fa..de5c64bbdb725818faf590107f0ecde475152852 100644 (file)
@@ -175,7 +175,7 @@ void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq);
 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd,
                int retries);
 
-int mmc_hw_reset(struct mmc_host *host);
+int mmc_hw_reset(struct mmc_card *card);
 int mmc_sw_reset(struct mmc_host *host);
 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card);
 
index 962b14d403e8fc4a8144ed0e02f2593883843ecd..46ffab808f037bfbd26a1fccf28cf06cd48953f2 100644 (file)
@@ -1397,13 +1397,16 @@ static inline unsigned long *section_to_usemap(struct mem_section *ms)
 
 static inline struct mem_section *__nr_to_section(unsigned long nr)
 {
+       unsigned long root = SECTION_NR_TO_ROOT(nr);
+
+       if (unlikely(root >= NR_SECTION_ROOTS))
+               return NULL;
+
 #ifdef CONFIG_SPARSEMEM_EXTREME
-       if (!mem_section)
+       if (!mem_section || !mem_section[root])
                return NULL;
 #endif
-       if (!mem_section[SECTION_NR_TO_ROOT(nr)])
-               return NULL;
-       return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
+       return &mem_section[root][nr & SECTION_ROOT_MASK];
 }
 extern size_t mem_section_usage_size(void);
 
index ba736b457a0683c6358495b738502b9f6b1335fb..12093f4db50c423c3fe8b8924d89392ab9567ada 100644 (file)
@@ -125,6 +125,25 @@ struct socket {
        struct socket_wq        wq;
 };
 
+/*
+ * "descriptor" for what we're up to with a read.
+ * This allows us to use the same read code yet
+ * have multiple different users of the data that
+ * we read from a file.
+ *
+ * The simplest case just copies the data to user
+ * mode.
+ */
+typedef struct {
+       size_t written;
+       size_t count;
+       union {
+               char __user *buf;
+               void *data;
+       } arg;
+       int error;
+} read_descriptor_t;
+
 struct vm_area_struct;
 struct page;
 struct sockaddr;
index 59e27a2b7bf04a28d98d987dedd46dc11d4b8bc8..b1fbe21650bb5ed1d8593d895ba3106869311074 100644 (file)
@@ -199,10 +199,10 @@ struct net_device_stats {
  * Try to fit them in a single cache line, for dev_get_stats() sake.
  */
 struct net_device_core_stats {
-       local_t         rx_dropped;
-       local_t         tx_dropped;
-       local_t         rx_nohandler;
-} __aligned(4 * sizeof(local_t));
+       unsigned long   rx_dropped;
+       unsigned long   tx_dropped;
+       unsigned long   rx_nohandler;
+} __aligned(4 * sizeof(unsigned long));
 
 #include <linux/cache.h>
 #include <linux/skbuff.h>
@@ -3843,15 +3843,15 @@ static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
        return false;
 }
 
-struct net_device_core_stats *netdev_core_stats_alloc(struct net_device *dev);
+struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev);
 
-static inline struct net_device_core_stats *dev_core_stats(struct net_device *dev)
+static inline struct net_device_core_stats __percpu *dev_core_stats(struct net_device *dev)
 {
        /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
        struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats);
 
        if (likely(p))
-               return this_cpu_ptr(p);
+               return p;
 
        return netdev_core_stats_alloc(dev);
 }
@@ -3859,14 +3859,11 @@ static inline struct net_device_core_stats *dev_core_stats(struct net_device *de
 #define DEV_CORE_STATS_INC(FIELD)                                              \
 static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev)                \
 {                                                                              \
-       struct net_device_core_stats *p;                                        \
+       struct net_device_core_stats __percpu *p;                               \
                                                                                \
-       preempt_disable();                                                      \
        p = dev_core_stats(dev);                                                \
-                                                                               \
        if (p)                                                                  \
-               local_inc(&p->FIELD);                                           \
-       preempt_enable();                                                       \
+               this_cpu_inc(p->FIELD);                                         \
 }
 DEV_CORE_STATS_INC(rx_dropped)
 DEV_CORE_STATS_INC(tx_dropped)
index 614f22213e21709ea6eb4f6cdf424e2994047ed7..c7bf1eaf51d5aaa71c60a52006de277b94177a1f 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/fs.h>
 #include <linux/pagemap.h>
 
+enum netfs_sreq_ref_trace;
+
 /*
  * Overload PG_private_2 to give us PG_fscache - this is used to indicate that
  * a page is currently backed by a local disk cache
@@ -106,7 +108,7 @@ static inline int wait_on_page_fscache_killable(struct page *page)
        return folio_wait_private_2_killable(page_folio(page));
 }
 
-enum netfs_read_source {
+enum netfs_io_source {
        NETFS_FILL_WITH_ZEROES,
        NETFS_DOWNLOAD_FROM_SERVER,
        NETFS_READ_FROM_CACHE,
@@ -116,6 +118,17 @@ enum netfs_read_source {
 typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error,
                                      bool was_async);
 
+/*
+ * Per-inode description.  This must be directly after the inode struct.
+ */
+struct netfs_i_context {
+       const struct netfs_request_ops *ops;
+#if IS_ENABLED(CONFIG_FSCACHE)
+       struct fscache_cookie   *cache;
+#endif
+       loff_t                  remote_i_size;  /* Size of the remote file */
+};
+
 /*
  * Resources required to do operations on a cache.
  */
@@ -130,69 +143,75 @@ struct netfs_cache_resources {
 /*
  * Descriptor for a single component subrequest.
  */
-struct netfs_read_subrequest {
-       struct netfs_read_request *rreq;        /* Supervising read request */
+struct netfs_io_subrequest {
+       struct netfs_io_request *rreq;          /* Supervising I/O request */
        struct list_head        rreq_link;      /* Link in rreq->subrequests */
        loff_t                  start;          /* Where to start the I/O */
        size_t                  len;            /* Size of the I/O */
        size_t                  transferred;    /* Amount of data transferred */
-       refcount_t              usage;
+       refcount_t              ref;
        short                   error;          /* 0 or error that occurred */
        unsigned short          debug_index;    /* Index in list (for debugging output) */
-       enum netfs_read_source  source;         /* Where to read from */
+       enum netfs_io_source    source;         /* Where to read from/write to */
        unsigned long           flags;
-#define NETFS_SREQ_WRITE_TO_CACHE      0       /* Set if should write to cache */
+#define NETFS_SREQ_COPY_TO_CACHE       0       /* Set if should copy the data to the cache */
 #define NETFS_SREQ_CLEAR_TAIL          1       /* Set if the rest of the read should be cleared */
-#define NETFS_SREQ_SHORT_READ          2       /* Set if there was a short read from the cache */
+#define NETFS_SREQ_SHORT_IO            2       /* Set if the I/O was short */
 #define NETFS_SREQ_SEEK_DATA_READ      3       /* Set if ->read() should SEEK_DATA first */
 #define NETFS_SREQ_NO_PROGRESS         4       /* Set if we didn't manage to read any data */
 };
 
+enum netfs_io_origin {
+       NETFS_READAHEAD,                /* This read was triggered by readahead */
+       NETFS_READPAGE,                 /* This read is a synchronous read */
+       NETFS_READ_FOR_WRITE,           /* This read is to prepare a write */
+} __mode(byte);
+
 /*
- * Descriptor for a read helper request.  This is used to make multiple I/O
- * requests on a variety of sources and then stitch the result together.
+ * Descriptor for an I/O helper request.  This is used to make multiple I/O
+ * operations to a variety of data stores and then stitch the result together.
  */
-struct netfs_read_request {
+struct netfs_io_request {
        struct work_struct      work;
        struct inode            *inode;         /* The file being accessed */
        struct address_space    *mapping;       /* The mapping being accessed */
        struct netfs_cache_resources cache_resources;
-       struct list_head        subrequests;    /* Requests to fetch I/O from disk or net */
+       struct list_head        subrequests;    /* Contributory I/O operations */
        void                    *netfs_priv;    /* Private data for the netfs */
        unsigned int            debug_id;
-       atomic_t                nr_rd_ops;      /* Number of read ops in progress */
-       atomic_t                nr_wr_ops;      /* Number of write ops in progress */
+       atomic_t                nr_outstanding; /* Number of ops in progress */
+       atomic_t                nr_copy_ops;    /* Number of copy-to-cache ops in progress */
        size_t                  submitted;      /* Amount submitted for I/O so far */
        size_t                  len;            /* Length of the request */
        short                   error;          /* 0 or error that occurred */
+       enum netfs_io_origin    origin;         /* Origin of the request */
        loff_t                  i_size;         /* Size of the file */
        loff_t                  start;          /* Start position */
        pgoff_t                 no_unlock_folio; /* Don't unlock this folio after read */
-       refcount_t              usage;
+       refcount_t              ref;
        unsigned long           flags;
 #define NETFS_RREQ_INCOMPLETE_IO       0       /* Some ioreqs terminated short or with error */
-#define NETFS_RREQ_WRITE_TO_CACHE      1       /* Need to write to the cache */
+#define NETFS_RREQ_COPY_TO_CACHE       1       /* Need to write to the cache */
 #define NETFS_RREQ_NO_UNLOCK_FOLIO     2       /* Don't unlock no_unlock_folio on completion */
 #define NETFS_RREQ_DONT_UNLOCK_FOLIOS  3       /* Don't unlock the folios on completion */
 #define NETFS_RREQ_FAILED              4       /* The request failed */
 #define NETFS_RREQ_IN_PROGRESS         5       /* Unlocked when the request completes */
-       const struct netfs_read_request_ops *netfs_ops;
+       const struct netfs_request_ops *netfs_ops;
 };
 
 /*
  * Operations the network filesystem can/must provide to the helpers.
  */
-struct netfs_read_request_ops {
-       bool (*is_cache_enabled)(struct inode *inode);
-       void (*init_rreq)(struct netfs_read_request *rreq, struct file *file);
-       int (*begin_cache_operation)(struct netfs_read_request *rreq);
-       void (*expand_readahead)(struct netfs_read_request *rreq);
-       bool (*clamp_length)(struct netfs_read_subrequest *subreq);
-       void (*issue_op)(struct netfs_read_subrequest *subreq);
-       bool (*is_still_valid)(struct netfs_read_request *rreq);
+struct netfs_request_ops {
+       int (*init_request)(struct netfs_io_request *rreq, struct file *file);
+       int (*begin_cache_operation)(struct netfs_io_request *rreq);
+       void (*expand_readahead)(struct netfs_io_request *rreq);
+       bool (*clamp_length)(struct netfs_io_subrequest *subreq);
+       void (*issue_read)(struct netfs_io_subrequest *subreq);
+       bool (*is_still_valid)(struct netfs_io_request *rreq);
        int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
                                 struct folio *folio, void **_fsdata);
-       void (*done)(struct netfs_read_request *rreq);
+       void (*done)(struct netfs_io_request *rreq);
        void (*cleanup)(struct address_space *mapping, void *netfs_priv);
 };
 
@@ -235,7 +254,7 @@ struct netfs_cache_ops {
        /* Prepare a read operation, shortening it to a cached/uncached
         * boundary as appropriate.
         */
-       enum netfs_read_source (*prepare_read)(struct netfs_read_subrequest *subreq,
+       enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq,
                                               loff_t i_size);
 
        /* Prepare a write operation, working out what part of the write we can
@@ -254,20 +273,89 @@ struct netfs_cache_ops {
 };
 
 struct readahead_control;
-extern void netfs_readahead(struct readahead_control *,
-                           const struct netfs_read_request_ops *,
-                           void *);
-extern int netfs_readpage(struct file *,
-                         struct folio *,
-                         const struct netfs_read_request_ops *,
-                         void *);
+extern void netfs_readahead(struct readahead_control *);
+extern int netfs_readpage(struct file *, struct page *);
 extern int netfs_write_begin(struct file *, struct address_space *,
                             loff_t, unsigned int, unsigned int, struct folio **,
-                            void **,
-                            const struct netfs_read_request_ops *,
-                            void *);
+                            void **);
 
-extern void netfs_subreq_terminated(struct netfs_read_subrequest *, ssize_t, bool);
+extern void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool);
+extern void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
+                                enum netfs_sreq_ref_trace what);
+extern void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
+                                bool was_async, enum netfs_sreq_ref_trace what);
 extern void netfs_stats_show(struct seq_file *);
 
+/**
+ * netfs_i_context - Get the netfs inode context from the inode
+ * @inode: The inode to query
+ *
+ * Get the netfs lib inode context from the network filesystem's inode.  The
+ * context struct is expected to directly follow on from the VFS inode struct.
+ */
+static inline struct netfs_i_context *netfs_i_context(struct inode *inode)
+{
+       return (struct netfs_i_context *)(inode + 1);
+}
+
+/**
+ * netfs_inode - Get the netfs inode from the inode context
+ * @ctx: The context to query
+ *
+ * Get the netfs inode from the netfs library's inode context.  The VFS inode
+ * is expected to directly precede the context struct.
+ */
+static inline struct inode *netfs_inode(struct netfs_i_context *ctx)
+{
+       return ((struct inode *)ctx) - 1;
+}
+
+/**
+ * netfs_i_context_init - Initialise a netfs lib context
+ * @inode: The inode with which the context is associated
+ * @ops: The netfs's operations list
+ *
+ * Initialise the netfs library context struct.  This is expected to follow on
+ * directly from the VFS inode struct.
+ */
+static inline void netfs_i_context_init(struct inode *inode,
+                                       const struct netfs_request_ops *ops)
+{
+       struct netfs_i_context *ctx = netfs_i_context(inode);
+
+       memset(ctx, 0, sizeof(*ctx));
+       ctx->ops = ops;
+       ctx->remote_i_size = i_size_read(inode);
+}
+
+/**
+ * netfs_resize_file - Note that a file got resized
+ * @inode: The inode being resized
+ * @new_i_size: The new file size
+ *
+ * Inform the netfs lib that a file got resized so that it can adjust its state.
+ */
+static inline void netfs_resize_file(struct inode *inode, loff_t new_i_size)
+{
+       struct netfs_i_context *ctx = netfs_i_context(inode);
+
+       ctx->remote_i_size = new_i_size;
+}
+
+/**
+ * netfs_i_cookie - Get the cache cookie from the inode
+ * @inode: The inode to query
+ *
+ * Get the caching cookie (if enabled) from the network filesystem's inode.
+ */
+static inline struct fscache_cookie *netfs_i_cookie(struct inode *inode)
+{
+#if IS_ENABLED(CONFIG_FSCACHE)
+       struct netfs_i_context *ctx = netfs_i_context(inode);
+       return ctx->cache;
+#else
+       return NULL;
+#endif
+}
+
 #endif /* _LINUX_NETFS_H */
index 49ba486aea5fd831f707ed3bc5153814bbe394c0..2863e5a69c6abdd208284c0238d135f297037b9f 100644 (file)
@@ -1694,6 +1694,7 @@ struct nfs_unlinkdata {
 struct nfs_renamedata {
        struct nfs_renameargs   args;
        struct nfs_renameres    res;
+       struct rpc_task         task;
        const struct cred       *cred;
        struct inode            *old_dir;
        struct dentry           *old_dentry;
index 4f44f83817a9117c70a7fbe845276dd62f0bf1c2..f626a445d1a872647c27e6c09139dca1d99af676 100644 (file)
@@ -346,6 +346,7 @@ enum {
        NVME_CTRL_ONCS_TIMESTAMP                = 1 << 6,
        NVME_CTRL_VWC_PRESENT                   = 1 << 0,
        NVME_CTRL_OACS_SEC_SUPP                 = 1 << 0,
+       NVME_CTRL_OACS_NS_MNGT_SUPP             = 1 << 3,
        NVME_CTRL_OACS_DIRECTIVES               = 1 << 5,
        NVME_CTRL_OACS_DBBUF_SUPP               = 1 << 8,
        NVME_CTRL_LPA_CMD_EFFECTS_LOG           = 1 << 1,
index a8d0b327b066a35997fbca1cd26a02c53e18a1f8..993994cd943a0db8a84b374cbfd791e5a61c0dd8 100644 (file)
@@ -752,8 +752,6 @@ struct page *read_cache_page(struct address_space *, pgoff_t index,
                filler_t *filler, void *data);
 extern struct page * read_cache_page_gfp(struct address_space *mapping,
                                pgoff_t index, gfp_t gfp_mask);
-extern int read_cache_pages(struct address_space *mapping,
-               struct list_head *pages, filler_t *filler, void *data);
 
 static inline struct page *read_mapping_page(struct address_space *mapping,
                                pgoff_t index, struct file *file)
diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h
deleted file mode 100644 (file)
index 249d4d7..0000000
+++ /dev/null
@@ -1,129 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* include this file if the platform implements the dma_ DMA Mapping API
- * and wants to provide the pci_ DMA Mapping API in terms of it */
-
-#ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H
-#define _ASM_GENERIC_PCI_DMA_COMPAT_H
-
-#include <linux/dma-mapping.h>
-
-/* This defines the direction arg to the DMA mapping routines. */
-#define PCI_DMA_BIDIRECTIONAL  DMA_BIDIRECTIONAL
-#define PCI_DMA_TODEVICE       DMA_TO_DEVICE
-#define PCI_DMA_FROMDEVICE     DMA_FROM_DEVICE
-#define PCI_DMA_NONE           DMA_NONE
-
-static inline void *
-pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
-                    dma_addr_t *dma_handle)
-{
-       return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
-}
-
-static inline void *
-pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
-                     dma_addr_t *dma_handle)
-{
-       return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
-}
-
-static inline void
-pci_free_consistent(struct pci_dev *hwdev, size_t size,
-                   void *vaddr, dma_addr_t dma_handle)
-{
-       dma_free_coherent(&hwdev->dev, size, vaddr, dma_handle);
-}
-
-static inline dma_addr_t
-pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
-{
-       return dma_map_single(&hwdev->dev, ptr, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
-                size_t size, int direction)
-{
-       dma_unmap_single(&hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
-}
-
-static inline dma_addr_t
-pci_map_page(struct pci_dev *hwdev, struct page *page,
-            unsigned long offset, size_t size, int direction)
-{
-       return dma_map_page(&hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
-              size_t size, int direction)
-{
-       dma_unmap_page(&hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
-}
-
-static inline int
-pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
-          int nents, int direction)
-{
-       return dma_map_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
-            int nents, int direction)
-{
-       dma_unmap_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
-                   size_t size, int direction)
-{
-       dma_sync_single_for_cpu(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
-                   size_t size, int direction)
-{
-       dma_sync_single_for_device(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
-               int nelems, int direction)
-{
-       dma_sync_sg_for_cpu(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
-               int nelems, int direction)
-{
-       dma_sync_sg_for_device(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
-}
-
-static inline int
-pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
-{
-       return dma_mapping_error(&pdev->dev, dma_addr);
-}
-
-#ifdef CONFIG_PCI
-static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
-{
-       return dma_set_mask(&dev->dev, mask);
-}
-
-static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
-{
-       return dma_set_coherent_mask(&dev->dev, mask);
-}
-#else
-static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
-{ return -EIO; }
-static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
-{ return -EIO; }
-#endif
-
-#endif
index b957eeb89c7a883bfbf487aa4d956d737f365082..60adf42460abbb340e4e0810ac4a68a953274f9c 100644 (file)
@@ -2473,8 +2473,7 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
 void pci_uevent_ers(struct pci_dev *pdev, enum  pci_ers_result err_type);
 #endif
 
-/* Provide the legacy pci_dma_* API */
-#include <linux/pci-dma-compat.h>
+#include <linux/dma-mapping.h>
 
 #define pci_printk(level, pdev, fmt, arg...) \
        dev_printk(level, &(pdev)->dev, fmt, ##arg)
index 060e8d2031814404175c8635c9a6184ed2d7e885..1766e1de695600968b9501c7d51d6a4db00b53a3 100644 (file)
@@ -34,15 +34,19 @@ posix_acl_xattr_count(size_t size)
 
 #ifdef CONFIG_FS_POSIX_ACL
 void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
+                                  struct inode *inode,
                                   void *value, size_t size);
 void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns,
+                                  struct inode *inode,
                                 void *value, size_t size);
 #else
 static inline void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
+                                                struct inode *inode,
                                                 void *value, size_t size)
 {
 }
 static inline void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns,
+                                              struct inode *inode,
                                               void *value, size_t size)
 {
 }
index 47fd1c2d3a579f019a2e3c5af39bbf229d1d3908..1fd9c6a21ebe85d8474867ba344c3451eb99bc99 100644 (file)
@@ -110,8 +110,6 @@ struct rtc_device {
        struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */
        int pie_enabled;
        struct work_struct irqwork;
-       /* Some hardware can't support UIE mode */
-       int uie_unsupported;
 
        /*
         * This offset specifies the update timing of the RTC.
index 67ee9d20cc5aa76aaefaab949010e4afc67e76e2..5a41c3bbcbe3821ebc854976e3bfeea685689ee9 100644 (file)
@@ -46,7 +46,6 @@ struct ds1685_priv {
        u32 regstep;
        int irq_num;
        bool bcd_mode;
-       bool no_irq;
        u8 (*read)(struct ds1685_priv *, int);
        void (*write)(struct ds1685_priv *, int, u8);
        void (*prepare_poweroff)(void);
index dffeb8281c2d9de474f777f577f02175122bf42e..8f5a86e210b90c43c5246eaa121b2048d0351066 100644 (file)
@@ -174,7 +174,7 @@ static inline unsigned int __map_depth(const struct sbitmap *sb, int index)
 static inline void sbitmap_free(struct sbitmap *sb)
 {
        free_percpu(sb->alloc_hint);
-       kfree(sb->map);
+       kvfree(sb->map);
        sb->map = NULL;
 }
 
index 4a6fdd2a679fe9a4f14f3fcac36714c090a15719..d5e3c00b74e12546c44faad9e00f85a8501ccf98 100644 (file)
@@ -1090,9 +1090,6 @@ struct task_struct {
        /* Restored if set_restore_sigmask() was used: */
        sigset_t                        saved_sigmask;
        struct sigpending               pending;
-#ifdef CONFIG_RT_DELAYED_SIGNALS
-       struct kernel_siginfo           forced_info;
-#endif
        unsigned long                   sas_ss_sp;
        size_t                          sas_ss_size;
        unsigned int                    sas_ss_flags;
index 88cc16444b43125e5fbd274868013fe29869e465..60820ab511d224f3bb655b0a43459591eae6821e 100644 (file)
@@ -162,6 +162,7 @@ int seq_dentry(struct seq_file *, struct dentry *, const char *);
 int seq_path_root(struct seq_file *m, const struct path *path,
                  const struct path *root, const char *esc);
 
+void *single_start(struct seq_file *, loff_t *);
 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
 int single_release(struct inode *, struct file *);
index 3e56a9751c062804b7f5f3db993144c0d4438a65..df53bed9d71f1de74af1410515ab91104b908909 100644 (file)
@@ -180,13 +180,13 @@ extern int static_call_text_reserved(void *start, void *end);
 
 extern long __static_call_return0(void);
 
-#define __DEFINE_STATIC_CALL(name, _func, _func_init)                  \
+#define DEFINE_STATIC_CALL(name, _func)                                        \
        DECLARE_STATIC_CALL(name, _func);                               \
        struct static_call_key STATIC_CALL_KEY(name) = {                \
-               .func = _func_init,                                     \
+               .func = _func,                                          \
                .type = 1,                                              \
        };                                                              \
-       ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func_init)
+       ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
 
 #define DEFINE_STATIC_CALL_NULL(name, _func)                           \
        DECLARE_STATIC_CALL(name, _func);                               \
@@ -196,6 +196,14 @@ extern long __static_call_return0(void);
        };                                                              \
        ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
 
+#define DEFINE_STATIC_CALL_RET0(name, _func)                           \
+       DECLARE_STATIC_CALL(name, _func);                               \
+       struct static_call_key STATIC_CALL_KEY(name) = {                \
+               .func = __static_call_return0,                          \
+               .type = 1,                                              \
+       };                                                              \
+       ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)
+
 #define static_call_cond(name) (void)__static_call(name)
 
 #define EXPORT_STATIC_CALL(name)                                       \
@@ -217,12 +225,12 @@ extern long __static_call_return0(void);
 
 static inline int static_call_init(void) { return 0; }
 
-#define __DEFINE_STATIC_CALL(name, _func, _func_init)                  \
+#define DEFINE_STATIC_CALL(name, _func)                                        \
        DECLARE_STATIC_CALL(name, _func);                               \
        struct static_call_key STATIC_CALL_KEY(name) = {                \
-               .func = _func_init,                                     \
+               .func = _func,                                          \
        };                                                              \
-       ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func_init)
+       ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
 
 #define DEFINE_STATIC_CALL_NULL(name, _func)                           \
        DECLARE_STATIC_CALL(name, _func);                               \
@@ -231,6 +239,12 @@ static inline int static_call_init(void) { return 0; }
        };                                                              \
        ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
 
+#define DEFINE_STATIC_CALL_RET0(name, _func)                           \
+       DECLARE_STATIC_CALL(name, _func);                               \
+       struct static_call_key STATIC_CALL_KEY(name) = {                \
+               .func = __static_call_return0,                          \
+       };                                                              \
+       ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)
 
 #define static_call_cond(name) (void)__static_call(name)
 
@@ -248,10 +262,7 @@ static inline int static_call_text_reserved(void *start, void *end)
        return 0;
 }
 
-static inline long __static_call_return0(void)
-{
-       return 0;
-}
+extern long __static_call_return0(void);
 
 #define EXPORT_STATIC_CALL(name)                                       \
        EXPORT_SYMBOL(STATIC_CALL_KEY(name));                           \
@@ -281,11 +292,14 @@ static inline long __static_call_return0(void)
                .func = _func_init,                                     \
        }
 
+#define DEFINE_STATIC_CALL(name, _func)                                        \
+       __DEFINE_STATIC_CALL(name, _func, _func)
+
 #define DEFINE_STATIC_CALL_NULL(name, _func)                           \
-       DECLARE_STATIC_CALL(name, _func);                               \
-       struct static_call_key STATIC_CALL_KEY(name) = {                \
-               .func = NULL,                                           \
-       }
+       __DEFINE_STATIC_CALL(name, _func, NULL)
+
+#define DEFINE_STATIC_CALL_RET0(name, _func)                           \
+       __DEFINE_STATIC_CALL(name, _func, __static_call_return0)
 
 static inline void __static_call_nop(void) { }
 
@@ -327,10 +341,4 @@ static inline int static_call_text_reserved(void *start, void *end)
 
 #endif /* CONFIG_HAVE_STATIC_CALL */
 
-#define DEFINE_STATIC_CALL(name, _func)                                        \
-       __DEFINE_STATIC_CALL(name, _func, _func)
-
-#define DEFINE_STATIC_CALL_RET0(name, _func)                           \
-       __DEFINE_STATIC_CALL(name, _func, __static_call_return0)
-
 #endif /* _LINUX_STATIC_CALL_H */
index a5dda4987e8ba6e014a2a60d4c97b707febdea36..217711fc9cace111ace19e531537b8ab5ab3c41d 100644 (file)
@@ -395,6 +395,7 @@ struct svc_deferred_req {
        size_t                  addrlen;
        struct sockaddr_storage daddr;  /* where reply must come from */
        size_t                  daddrlen;
+       void                    *xprt_ctxt;
        struct cache_deferred_req handle;
        size_t                  xprt_hlen;
        int                     argslen;
index 45a9530d383999b01606912b0faddde7f0fe069f..522bbf937957132e121ae9e175917e34cde9bc6e 100644 (file)
@@ -144,7 +144,7 @@ struct rpc_xprt_ops {
        unsigned short  (*get_srcport)(struct rpc_xprt *xprt);
        int             (*buf_alloc)(struct rpc_task *task);
        void            (*buf_free)(struct rpc_task *task);
-       void            (*prepare_request)(struct rpc_rqst *req);
+       int             (*prepare_request)(struct rpc_rqst *req);
        int             (*send_request)(struct rpc_rqst *req);
        void            (*wait_for_reply_request)(struct rpc_task *task);
        void            (*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
@@ -358,10 +358,9 @@ int                        xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
 void                   xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
 void                   xprt_free_slot(struct rpc_xprt *xprt,
                                       struct rpc_rqst *req);
-void                   xprt_request_prepare(struct rpc_rqst *req);
 bool                   xprt_prepare_transmit(struct rpc_task *task);
 void                   xprt_request_enqueue_transmit(struct rpc_task *task);
-void                   xprt_request_enqueue_receive(struct rpc_task *task);
+int                    xprt_request_enqueue_receive(struct rpc_task *task);
 void                   xprt_request_wait_receive(struct rpc_task *task);
 void                   xprt_request_dequeue_xprt(struct rpc_task *task);
 bool                   xprt_request_need_retransmit(struct rpc_task *task);
index a4b1af581f69ec055fbc0828765fad10bfd62a8e..248f4ac9564258524cd2ea897252023718c415fe 100644 (file)
@@ -59,6 +59,15 @@ struct crc64_pi_tuple {
        __u8   ref_tag[6];
 };
 
+/**
+ * lower_48_bits() - return bits 0-47 of a number
+ * @n: the number we're accessing
+ */
+static inline u64 lower_48_bits(u64 n)
+{
+       return n & ((1ull << 48) - 1);
+}
+
 static inline u64 ext_pi_ref_tag(struct request *rq)
 {
        unsigned int shift = ilog2(queue_logical_block_size(rq->q));
index 059b18eb1f1fab1c3af72b9369273a2782ba641d..5745c90c880054ac2a734d43142f8aa4d81431e5 100644 (file)
@@ -75,7 +75,7 @@
  * By default we use get_cycles() for this purpose, but individual
  * architectures may override this in their asm/timex.h header file.
  */
-#define random_get_entropy()   get_cycles()
+#define random_get_entropy()   ((unsigned long)get_cycles())
 #endif
 
 /*
similarity index 58%
rename from include/uapi/linux/user_events.h
rename to include/linux/user_events.h
index e570840571e1e024db016b64436118c590eaedd8..736e0560346386820886e5073e29dcea772a576d 100644 (file)
@@ -32,9 +32,6 @@
 /* Create dynamic location entry within a 32-bit value */
 #define DYN_LOC(offset, size) ((size) << 16 | (offset))
 
-/* Use raw iterator for attached BPF program(s), no affect on ftrace/perf */
-#define FLAG_BPF_ITER (1 << 0)
-
 /*
  * Describes an event registration and stores the results of the registration.
  * This structure is passed to the DIAG_IOCSREG ioctl, callers at a minimum
@@ -63,54 +60,4 @@ struct user_reg {
 /* Requests to delete a user_event */
 #define DIAG_IOCSDEL _IOW(DIAG_IOC_MAGIC, 1, char*)
 
-/* Data type that was passed to the BPF program */
-enum {
-       /* Data resides in kernel space */
-       USER_BPF_DATA_KERNEL,
-
-       /* Data resides in user space */
-       USER_BPF_DATA_USER,
-
-       /* Data is a pointer to a user_bpf_iter structure */
-       USER_BPF_DATA_ITER,
-};
-
-/*
- * Describes an iovec iterator that BPF programs can use to access data for
- * a given user_event write() / writev() call.
- */
-struct user_bpf_iter {
-
-       /* Offset of the data within the first iovec */
-       __u32 iov_offset;
-
-       /* Number of iovec structures */
-       __u32 nr_segs;
-
-       /* Pointer to iovec structures */
-       const struct iovec *iov;
-};
-
-/* Context that BPF programs receive when attached to a user_event */
-struct user_bpf_context {
-
-       /* Data type being passed (see union below) */
-       __u32 data_type;
-
-       /* Length of the data */
-       __u32 data_len;
-
-       /* Pointer to data, varies by data type */
-       union {
-               /* Kernel data (data_type == USER_BPF_DATA_KERNEL) */
-               void *kdata;
-
-               /* User data (data_type == USER_BPF_DATA_USER) */
-               void *udata;
-
-               /* Direct iovec (data_type == USER_BPF_DATA_ITER) */
-               struct user_bpf_iter *iter;
-       };
-};
-
 #endif /* _UAPI_LINUX_USER_EVENTS_H */
index 721089bb4c8490b685ec356f35cf66e7b60f93e0..8943a209202ea4844de46c5cc96d11a694bf7e40 100644 (file)
@@ -83,7 +83,7 @@ struct vdpa_device {
        unsigned int index;
        bool features_valid;
        bool use_va;
-       int nvqs;
+       u32 nvqs;
        struct vdpa_mgmt_dev *mdev;
 };
 
@@ -207,7 +207,8 @@ struct vdpa_map_file {
  * @reset:                     Reset device
  *                             @vdev: vdpa device
  *                             Returns integer: success (0) or error (< 0)
- * @get_config_size:           Get the size of the configuration space
+ * @get_config_size:           Get the size of the configuration space includes
+ *                             fields that are conditional on feature bits.
  *                             @vdev: vdpa device
  *                             Returns size_t: configuration size
  * @get_config:                        Read from device specific configuration space
@@ -337,10 +338,10 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
                                       dev_struct, member)), name, use_va), \
                                       dev_struct, member)
 
-int vdpa_register_device(struct vdpa_device *vdev, int nvqs);
+int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
 void vdpa_unregister_device(struct vdpa_device *vdev);
 
-int _vdpa_register_device(struct vdpa_device *vdev, int nvqs);
+int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
 void _vdpa_unregister_device(struct vdpa_device *vdev);
 
 /**
index 74a4a0f17b28bd8768e51bfd4f5ce5eed50a4ed3..48f2dd3c568c8334c8591c9dde23d3f4704949f8 100644 (file)
@@ -133,6 +133,8 @@ struct vfio_pci_core_device {
        struct mutex            ioeventfds_lock;
        struct list_head        ioeventfds_list;
        struct vfio_pci_vf_token        *vf_token;
+       struct list_head                sriov_pfs_item;
+       struct vfio_pci_core_device     *sriov_pf_core_dev;
        struct notifier_block   nb;
        struct mutex            vma_lock;
        struct list_head        vma_list;
index dafdc7f48c01b08a84574543af5ff9613b6ab57c..b341dd62aa4da9843f2af1d3132c7132dcc48341 100644 (file)
@@ -23,8 +23,6 @@ struct virtio_shm_region {
  *       any of @get/@set, @get_status/@set_status, or @get_features/
  *       @finalize_features are NOT safe to be called from an atomic
  *       context.
- * @enable_cbs: enable the callbacks
- *      vdev: the virtio_device
  * @get: read the value of a configuration field
  *     vdev: the virtio_device
  *     offset: the offset of the configuration field
@@ -78,7 +76,6 @@ struct virtio_shm_region {
  */
 typedef void vq_callback_t(struct virtqueue *);
 struct virtio_config_ops {
-       void (*enable_cbs)(struct virtio_device *vdev);
        void (*get)(struct virtio_device *vdev, unsigned offset,
                    void *buf, unsigned len);
        void (*set)(struct virtio_device *vdev, unsigned offset,
@@ -233,9 +230,6 @@ void virtio_device_ready(struct virtio_device *dev)
 {
        unsigned status = dev->config->get_status(dev);
 
-       if (dev->config->enable_cbs)
-                  dev->config->enable_cbs(dev);
-
        BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
        dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
 }
index 3b1df7da402d60cad6cfb5ac7c701058d60e2686..b159c278996128036f30b2bcbe8a51a3fa6aac3d 100644 (file)
@@ -26,7 +26,7 @@ struct notifier_block;                /* in notifier.h */
 #define VM_KASAN               0x00000080      /* has allocated kasan shadow memory */
 #define VM_FLUSH_RESET_PERMS   0x00000100      /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
 #define VM_MAP_PUT_PAGES       0x00000200      /* put pages and free array in vfree */
-#define VM_NO_HUGE_VMAP                0x00000400      /* force PAGE_SIZE pte mapping */
+#define VM_ALLOW_HUGE_VMAP     0x00000400      /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */
 
 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
        !defined(CONFIG_KASAN_VMALLOC)
@@ -153,7 +153,7 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
                        const void *caller) __alloc_size(1);
 void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
                int node, const void *caller) __alloc_size(1);
-void *vmalloc_no_huge(unsigned long size) __alloc_size(1);
+void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
 
 extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
 extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2);
index bb52b786be1b74b65b0ad0a55d2ecd81f44f5351..72feab5ea8d4a94d49ed48c531e2d74e9dbaac1d 100644 (file)
@@ -9,6 +9,7 @@
  * See Documentation/core-api/xarray.rst for how to use the XArray.
  */
 
+#include <linux/bitmap.h>
 #include <linux/bug.h>
 #include <linux/compiler.h>
 #include <linux/gfp.h>
index 90cd02ff77ef67f7f65e2c53127c4510c23bd4a9..9c5637d41d95168052686caf7b3ff51b517e6b9b 100644 (file)
@@ -4,8 +4,6 @@
 
 #include <linux/skbuff.h>
 
-#define ESP_SKB_FRAG_MAXSIZE (PAGE_SIZE << SKB_FRAG_PAGE_ORDER)
-
 struct ip_esp_hdr;
 
 static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)
index aa33e1092e2c4d6fb85334eb0477a0f5559a5807..9f65f1bfbd24646c4d9a37db85360126d038fc42 100644 (file)
@@ -59,6 +59,8 @@ struct flow_dissector_key_vlan {
                __be16  vlan_tci;
        };
        __be16  vlan_tpid;
+       __be16  vlan_eth_type;
+       u16     padding;
 };
 
 struct flow_dissector_mpls_lse {
index a38c4f1e4e5c641dcede4d7fedfcdbfadbac430e..74b369bddf49e82aaa722d412ac407e7f0c29224 100644 (file)
@@ -58,7 +58,7 @@ struct ip6_tnl {
 
        /* These fields used only by GRE */
        __u32 i_seqno;  /* The last seen seqno  */
-       __u32 o_seqno;  /* The last output seqno */
+       atomic_t o_seqno;       /* The last output seqno */
        int hlen;       /* tun_hlen + encap_hlen */
        int tun_hlen;   /* Precalculated header length */
        int encap_hlen; /* Encap header length (FOU,GUE) */
index 0219fe907b261952ec1f140d105cd74d7eda6b40..c24fa934221dde1c59ae6519cee783233d19af48 100644 (file)
@@ -116,7 +116,7 @@ struct ip_tunnel {
 
        /* These four fields used only by GRE */
        u32             i_seqno;        /* The last seen seqno  */
-       u32             o_seqno;        /* The last output seqno */
+       atomic_t        o_seqno;        /* The last output seqno */
        int             tun_hlen;       /* Precalculated header length */
 
        /* These four fields used only by ERSPAN */
@@ -243,11 +243,18 @@ static inline __be32 tunnel_id_to_key32(__be64 tun_id)
 static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
                                       int proto,
                                       __be32 daddr, __be32 saddr,
-                                      __be32 key, __u8 tos, int oif,
+                                      __be32 key, __u8 tos,
+                                      struct net *net, int oif,
                                       __u32 mark, __u32 tun_inner_hash)
 {
        memset(fl4, 0, sizeof(*fl4));
-       fl4->flowi4_oif = oif;
+
+       if (oif) {
+               fl4->flowi4_l3mdev = l3mdev_master_upper_ifindex_by_index_rcu(net, oif);
+               /* Legacy VRF/l3mdev use case */
+               fl4->flowi4_oif = fl4->flowi4_l3mdev ? 0 : oif;
+       }
+
        fl4->daddr = daddr;
        fl4->saddr = saddr;
        fl4->flowi4_tos = tos;
index 3d83b64471d32391fb632e8c25e12a8ec7d1b42e..b4af4837d80b4ed47d05474432d5b8ebb42322e7 100644 (file)
@@ -75,8 +75,8 @@ struct netns_ipv6 {
        struct list_head        fib6_walkers;
        rwlock_t                fib6_walker_lock;
        spinlock_t              fib6_gc_lock;
-       unsigned int             ip6_rt_gc_expire;
-       unsigned long            ip6_rt_last_gc;
+       atomic_t                ip6_rt_gc_expire;
+       unsigned long           ip6_rt_last_gc;
        unsigned char           flowlabel_has_excl;
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
        bool                    fib6_has_custom_rules;
index 70ca4a5e330a2002acffd7ddd4f685a758c7fbc4..cc1295037533a7741e454f7c040f77a21deae02b 100644 (file)
@@ -480,6 +480,7 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
                      u32 cookie);
 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
 struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
+                                           const struct tcp_request_sock_ops *af_ops,
                                            struct sock *sk, struct sk_buff *skb);
 #ifdef CONFIG_SYN_COOKIES
 
@@ -620,6 +621,7 @@ void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
 void tcp_reset(struct sock *sk, struct sk_buff *skb);
 void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
 void tcp_fin(struct sock *sk);
+void tcp_check_space(struct sock *sk);
 
 /* tcp_timer.c */
 void tcp_init_xmit_timers(struct sock *);
@@ -1042,6 +1044,7 @@ struct rate_sample {
        int  losses;            /* number of packets marked lost upon ACK */
        u32  acked_sacked;      /* number of packets newly (S)ACKed upon ACK */
        u32  prior_in_flight;   /* in flight before this ACK */
+       u32  last_end_seq;      /* end_seq of most recently ACKed packet */
        bool is_app_limited;    /* is sample from packet with bubble in pipe? */
        bool is_retrans;        /* is sample from retransmission? */
        bool is_ack_delayed;    /* is this (likely) a delayed ACK? */
@@ -1164,6 +1167,11 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
                  bool is_sack_reneg, struct rate_sample *rs);
 void tcp_rate_check_app_limited(struct sock *sk);
 
+static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
+{
+       return t1 > t2 || (t1 == t2 && after(seq1, seq2));
+}
+
 /* These functions determine how the current flow behaves in respect of SACK
  * handling. SACK is negotiated with the peer, and therefore it can vary
  * between different flows.
index e76c94697c1bc557af1f8351e26d3128fcdd3bfc..d0a24779c52dc44cb14c4fea56f8243e1b0f22a0 100644 (file)
@@ -53,8 +53,10 @@ enum {
 
 #define ISID_SIZE                      6
 
-/* Connection suspend "bit" */
-#define ISCSI_SUSPEND_BIT              1
+/* Connection flags */
+#define ISCSI_CONN_FLAG_SUSPEND_TX     BIT(0)
+#define ISCSI_CONN_FLAG_SUSPEND_RX     BIT(1)
+#define ISCSI_CONN_FLAG_BOUND          BIT(2)
 
 #define ISCSI_ITT_MASK                 0x1fff
 #define ISCSI_TOTAL_CMDS_MAX           4096
@@ -211,8 +213,7 @@ struct iscsi_conn {
        struct list_head        cmdqueue;       /* data-path cmd queue */
        struct list_head        requeue;        /* tasks needing another run */
        struct work_struct      xmitwork;       /* per-conn. xmit workqueue */
-       unsigned long           suspend_tx;     /* suspend Tx */
-       unsigned long           suspend_rx;     /* suspend Rx */
+       unsigned long           flags;          /* ISCSI_CONN_FLAGs */
 
        /* negotiated params */
        unsigned                max_recv_dlength; /* initiator_max_recv_dsl*/
index 38e4a67f5922ce4321050a36da21a0419a312419..9acb8422f68024cee9e70946b012541a460b38aa 100644 (file)
@@ -211,6 +211,8 @@ struct iscsi_cls_conn {
        struct mutex ep_mutex;
        struct iscsi_endpoint *ep;
 
+       /* Used when accessing flags and queueing work. */
+       spinlock_t lock;
        unsigned long flags;
        struct work_struct cleanup_work;
 
@@ -295,7 +297,7 @@ extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
 struct iscsi_endpoint {
        void *dd_data;                  /* LLD private data */
        struct device dev;
-       uint64_t id;
+       int id;
        struct iscsi_cls_conn *conn;
 };
 
index b7e9b58d3c78807271d48abe1719096936a64838..6d4cc49584c6372a127525a9b47710d4bcd64d8b 100644 (file)
@@ -284,6 +284,7 @@ int snd_card_disconnect(struct snd_card *card);
 void snd_card_disconnect_sync(struct snd_card *card);
 int snd_card_free(struct snd_card *card);
 int snd_card_free_when_closed(struct snd_card *card);
+int snd_card_free_on_error(struct device *dev, int ret);
 void snd_card_set_id(struct snd_card *card, const char *id);
 int snd_card_register(struct snd_card *card);
 int snd_card_info_init(void);
index 653dfffb3ac845400c19b9f57243af70aae4437f..8d79cebf95f328357cd83598374d29a85603df9b 100644 (file)
@@ -51,6 +51,11 @@ struct snd_dma_device {
 #define SNDRV_DMA_TYPE_DEV_SG  SNDRV_DMA_TYPE_DEV /* no SG-buf support */
 #define SNDRV_DMA_TYPE_DEV_WC_SG       SNDRV_DMA_TYPE_DEV_WC
 #endif
+/* fallback types, don't use those directly */
+#ifdef CONFIG_SND_DMA_SGBUF
+#define SNDRV_DMA_TYPE_DEV_SG_FALLBACK         10
+#define SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK      11
+#endif
 
 /*
  * info for buffer allocation
index 314f2779cab52e290740e96628264a12f057395e..6b99310b5b8890bd7ceacbae4f347a30c6a0324f 100644 (file)
@@ -402,6 +402,7 @@ struct snd_pcm_runtime {
        struct fasync_struct *fasync;
        bool stop_operating;            /* sync_stop will be called */
        struct mutex buffer_mutex;      /* protect for buffer changes */
+       atomic_t buffer_accessing;      /* >0: in r/w operation, <0: blocked */
 
        /* -- private section -- */
        void *private_data;
index 2c530637e10aec46f588ac62ad266c655f5370fe..311c14a20e7036869df4992131741d23c73b7fcb 100644 (file)
@@ -426,8 +426,8 @@ TRACE_EVENT(cachefiles_vol_coherency,
            );
 
 TRACE_EVENT(cachefiles_prep_read,
-           TP_PROTO(struct netfs_read_subrequest *sreq,
-                    enum netfs_read_source source,
+           TP_PROTO(struct netfs_io_subrequest *sreq,
+                    enum netfs_io_source source,
                     enum cachefiles_prepare_read_trace why,
                     ino_t cache_inode),
 
@@ -437,7 +437,7 @@ TRACE_EVENT(cachefiles_prep_read,
                    __field(unsigned int,               rreq            )
                    __field(unsigned short,             index           )
                    __field(unsigned short,             flags           )
-                   __field(enum netfs_read_source,     source          )
+                   __field(enum netfs_io_source,       source          )
                    __field(enum cachefiles_prepare_read_trace, why     )
                    __field(size_t,                     len             )
                    __field(loff_t,                     start           )
index e6f4ebbb4c69e52f4fc97797a272b198772d2c6a..beec534cbaab25e6a2ce4aacaa09134068f7394a 100644 (file)
 /*
  * Define enums for tracing information.
  */
-#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
-#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
-
-enum netfs_read_trace {
-       netfs_read_trace_expanded,
-       netfs_read_trace_readahead,
-       netfs_read_trace_readpage,
-       netfs_read_trace_write_begin,
-};
-
-enum netfs_rreq_trace {
-       netfs_rreq_trace_assess,
-       netfs_rreq_trace_done,
-       netfs_rreq_trace_free,
-       netfs_rreq_trace_resubmit,
-       netfs_rreq_trace_unlock,
-       netfs_rreq_trace_unmark,
-       netfs_rreq_trace_write,
-};
-
-enum netfs_sreq_trace {
-       netfs_sreq_trace_download_instead,
-       netfs_sreq_trace_free,
-       netfs_sreq_trace_prepare,
-       netfs_sreq_trace_resubmit_short,
-       netfs_sreq_trace_submit,
-       netfs_sreq_trace_terminated,
-       netfs_sreq_trace_write,
-       netfs_sreq_trace_write_skip,
-       netfs_sreq_trace_write_term,
-};
-
-enum netfs_failure {
-       netfs_fail_check_write_begin,
-       netfs_fail_copy_to_cache,
-       netfs_fail_read,
-       netfs_fail_short_readpage,
-       netfs_fail_short_write_begin,
-       netfs_fail_prepare_write,
-};
-
-#endif
-
 #define netfs_read_traces                                      \
        EM(netfs_read_trace_expanded,           "EXPANDED ")    \
        EM(netfs_read_trace_readahead,          "READAHEAD")    \
        EM(netfs_read_trace_readpage,           "READPAGE ")    \
        E_(netfs_read_trace_write_begin,        "WRITEBEGN")
 
+#define netfs_rreq_origins                                     \
+       EM(NETFS_READAHEAD,                     "RA")           \
+       EM(NETFS_READPAGE,                      "RP")           \
+       E_(NETFS_READ_FOR_WRITE,                "RW")
+
 #define netfs_rreq_traces                                      \
-       EM(netfs_rreq_trace_assess,             "ASSESS")       \
-       EM(netfs_rreq_trace_done,               "DONE  ")       \
-       EM(netfs_rreq_trace_free,               "FREE  ")       \
-       EM(netfs_rreq_trace_resubmit,           "RESUBM")       \
-       EM(netfs_rreq_trace_unlock,             "UNLOCK")       \
-       EM(netfs_rreq_trace_unmark,             "UNMARK")       \
-       E_(netfs_rreq_trace_write,              "WRITE ")
+       EM(netfs_rreq_trace_assess,             "ASSESS ")      \
+       EM(netfs_rreq_trace_copy,               "COPY   ")      \
+       EM(netfs_rreq_trace_done,               "DONE   ")      \
+       EM(netfs_rreq_trace_free,               "FREE   ")      \
+       EM(netfs_rreq_trace_resubmit,           "RESUBMT")      \
+       EM(netfs_rreq_trace_unlock,             "UNLOCK ")      \
+       E_(netfs_rreq_trace_unmark,             "UNMARK ")
 
 #define netfs_sreq_sources                                     \
        EM(NETFS_FILL_WITH_ZEROES,              "ZERO")         \
@@ -94,10 +56,47 @@ enum netfs_failure {
        EM(netfs_fail_check_write_begin,        "check-write-begin")    \
        EM(netfs_fail_copy_to_cache,            "copy-to-cache")        \
        EM(netfs_fail_read,                     "read")                 \
-       EM(netfs_fail_short_readpage,           "short-readpage")       \
-       EM(netfs_fail_short_write_begin,        "short-write-begin")    \
+       EM(netfs_fail_short_read,               "short-read")           \
        E_(netfs_fail_prepare_write,            "prep-write")
 
+#define netfs_rreq_ref_traces                                  \
+       EM(netfs_rreq_trace_get_hold,           "GET HOLD   ")  \
+       EM(netfs_rreq_trace_get_subreq,         "GET SUBREQ ")  \
+       EM(netfs_rreq_trace_put_complete,       "PUT COMPLT ")  \
+       EM(netfs_rreq_trace_put_discard,        "PUT DISCARD")  \
+       EM(netfs_rreq_trace_put_failed,         "PUT FAILED ")  \
+       EM(netfs_rreq_trace_put_hold,           "PUT HOLD   ")  \
+       EM(netfs_rreq_trace_put_subreq,         "PUT SUBREQ ")  \
+       EM(netfs_rreq_trace_put_zero_len,       "PUT ZEROLEN")  \
+       E_(netfs_rreq_trace_new,                "NEW        ")
+
+#define netfs_sreq_ref_traces                                  \
+       EM(netfs_sreq_trace_get_copy_to_cache,  "GET COPY2C ")  \
+       EM(netfs_sreq_trace_get_resubmit,       "GET RESUBMIT") \
+       EM(netfs_sreq_trace_get_short_read,     "GET SHORTRD")  \
+       EM(netfs_sreq_trace_new,                "NEW        ")  \
+       EM(netfs_sreq_trace_put_clear,          "PUT CLEAR  ")  \
+       EM(netfs_sreq_trace_put_failed,         "PUT FAILED ")  \
+       EM(netfs_sreq_trace_put_merged,         "PUT MERGED ")  \
+       EM(netfs_sreq_trace_put_no_copy,        "PUT NO COPY")  \
+       E_(netfs_sreq_trace_put_terminated,     "PUT TERM   ")
+
+#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+#undef EM
+#undef E_
+#define EM(a, b) a,
+#define E_(a, b) a
+
+enum netfs_read_trace { netfs_read_traces } __mode(byte);
+enum netfs_rreq_trace { netfs_rreq_traces } __mode(byte);
+enum netfs_sreq_trace { netfs_sreq_traces } __mode(byte);
+enum netfs_failure { netfs_failures } __mode(byte);
+enum netfs_rreq_ref_trace { netfs_rreq_ref_traces } __mode(byte);
+enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte);
+
+#endif
 
 /*
  * Export enum symbols via userspace.
@@ -108,10 +107,13 @@ enum netfs_failure {
 #define E_(a, b) TRACE_DEFINE_ENUM(a);
 
 netfs_read_traces;
+netfs_rreq_origins;
 netfs_rreq_traces;
 netfs_sreq_sources;
 netfs_sreq_traces;
 netfs_failures;
+netfs_rreq_ref_traces;
+netfs_sreq_ref_traces;
 
 /*
  * Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -123,7 +125,7 @@ netfs_failures;
 #define E_(a, b)       { a, b }
 
 TRACE_EVENT(netfs_read,
-           TP_PROTO(struct netfs_read_request *rreq,
+           TP_PROTO(struct netfs_io_request *rreq,
                     loff_t start, size_t len,
                     enum netfs_read_trace what),
 
@@ -156,31 +158,34 @@ TRACE_EVENT(netfs_read,
            );
 
 TRACE_EVENT(netfs_rreq,
-           TP_PROTO(struct netfs_read_request *rreq,
+           TP_PROTO(struct netfs_io_request *rreq,
                     enum netfs_rreq_trace what),
 
            TP_ARGS(rreq, what),
 
            TP_STRUCT__entry(
                    __field(unsigned int,               rreq            )
-                   __field(unsigned short,             flags           )
+                   __field(unsigned int,               flags           )
+                   __field(enum netfs_io_origin,       origin          )
                    __field(enum netfs_rreq_trace,      what            )
                             ),
 
            TP_fast_assign(
                    __entry->rreq       = rreq->debug_id;
                    __entry->flags      = rreq->flags;
+                   __entry->origin     = rreq->origin;
                    __entry->what       = what;
                           ),
 
-           TP_printk("R=%08x %s f=%02x",
+           TP_printk("R=%08x %s %s f=%02x",
                      __entry->rreq,
+                     __print_symbolic(__entry->origin, netfs_rreq_origins),
                      __print_symbolic(__entry->what, netfs_rreq_traces),
                      __entry->flags)
            );
 
 TRACE_EVENT(netfs_sreq,
-           TP_PROTO(struct netfs_read_subrequest *sreq,
+           TP_PROTO(struct netfs_io_subrequest *sreq,
                     enum netfs_sreq_trace what),
 
            TP_ARGS(sreq, what),
@@ -190,7 +195,7 @@ TRACE_EVENT(netfs_sreq,
                    __field(unsigned short,             index           )
                    __field(short,                      error           )
                    __field(unsigned short,             flags           )
-                   __field(enum netfs_read_source,     source          )
+                   __field(enum netfs_io_source,       source          )
                    __field(enum netfs_sreq_trace,      what            )
                    __field(size_t,                     len             )
                    __field(size_t,                     transferred     )
@@ -211,26 +216,26 @@ TRACE_EVENT(netfs_sreq,
 
            TP_printk("R=%08x[%u] %s %s f=%02x s=%llx %zx/%zx e=%d",
                      __entry->rreq, __entry->index,
-                     __print_symbolic(__entry->what, netfs_sreq_traces),
                      __print_symbolic(__entry->source, netfs_sreq_sources),
+                     __print_symbolic(__entry->what, netfs_sreq_traces),
                      __entry->flags,
                      __entry->start, __entry->transferred, __entry->len,
                      __entry->error)
            );
 
 TRACE_EVENT(netfs_failure,
-           TP_PROTO(struct netfs_read_request *rreq,
-                    struct netfs_read_subrequest *sreq,
+           TP_PROTO(struct netfs_io_request *rreq,
+                    struct netfs_io_subrequest *sreq,
                     int error, enum netfs_failure what),
 
            TP_ARGS(rreq, sreq, error, what),
 
            TP_STRUCT__entry(
                    __field(unsigned int,               rreq            )
-                   __field(unsigned short,             index           )
+                   __field(short,                      index           )
                    __field(short,                      error           )
                    __field(unsigned short,             flags           )
-                   __field(enum netfs_read_source,     source          )
+                   __field(enum netfs_io_source,       source          )
                    __field(enum netfs_failure,         what            )
                    __field(size_t,                     len             )
                    __field(size_t,                     transferred     )
@@ -239,17 +244,17 @@ TRACE_EVENT(netfs_failure,
 
            TP_fast_assign(
                    __entry->rreq       = rreq->debug_id;
-                   __entry->index      = sreq ? sreq->debug_index : 0;
+                   __entry->index      = sreq ? sreq->debug_index : -1;
                    __entry->error      = error;
                    __entry->flags      = sreq ? sreq->flags : 0;
                    __entry->source     = sreq ? sreq->source : NETFS_INVALID_READ;
                    __entry->what       = what;
-                   __entry->len        = sreq ? sreq->len : 0;
+                   __entry->len        = sreq ? sreq->len : rreq->len;
                    __entry->transferred = sreq ? sreq->transferred : 0;
                    __entry->start      = sreq ? sreq->start : 0;
                           ),
 
-           TP_printk("R=%08x[%u] %s f=%02x s=%llx %zx/%zx %s e=%d",
+           TP_printk("R=%08x[%d] %s f=%02x s=%llx %zx/%zx %s e=%d",
                      __entry->rreq, __entry->index,
                      __print_symbolic(__entry->source, netfs_sreq_sources),
                      __entry->flags,
@@ -258,6 +263,59 @@ TRACE_EVENT(netfs_failure,
                      __entry->error)
            );
 
+TRACE_EVENT(netfs_rreq_ref,
+           TP_PROTO(unsigned int rreq_debug_id, int ref,
+                    enum netfs_rreq_ref_trace what),
+
+           TP_ARGS(rreq_debug_id, ref, what),
+
+           TP_STRUCT__entry(
+                   __field(unsigned int,               rreq            )
+                   __field(int,                        ref             )
+                   __field(enum netfs_rreq_ref_trace,  what            )
+                            ),
+
+           TP_fast_assign(
+                   __entry->rreq       = rreq_debug_id;
+                   __entry->ref        = ref;
+                   __entry->what       = what;
+                          ),
+
+           TP_printk("R=%08x %s r=%u",
+                     __entry->rreq,
+                     __print_symbolic(__entry->what, netfs_rreq_ref_traces),
+                     __entry->ref)
+           );
+
+TRACE_EVENT(netfs_sreq_ref,
+           TP_PROTO(unsigned int rreq_debug_id, unsigned int subreq_debug_index,
+                    int ref, enum netfs_sreq_ref_trace what),
+
+           TP_ARGS(rreq_debug_id, subreq_debug_index, ref, what),
+
+           TP_STRUCT__entry(
+                   __field(unsigned int,               rreq            )
+                   __field(unsigned int,               subreq          )
+                   __field(int,                        ref             )
+                   __field(enum netfs_sreq_ref_trace,  what            )
+                            ),
+
+           TP_fast_assign(
+                   __entry->rreq       = rreq_debug_id;
+                   __entry->subreq     = subreq_debug_index;
+                   __entry->ref        = ref;
+                   __entry->what       = what;
+                          ),
+
+           TP_printk("R=%08x[%x] %s r=%u",
+                     __entry->rreq,
+                     __entry->subreq,
+                     __print_symbolic(__entry->what, netfs_sreq_ref_traces),
+                     __entry->ref)
+           );
+
+#undef EM
+#undef E_
 #endif /* _TRACE_NETFS_H */
 
 /* This part must be outside protection */
index 0f34f13ebd55853adbe773640564badcbcf8b5a7..3995c58a1c514ccafdb2049cdc0b25566a8f227a 100644 (file)
@@ -1004,7 +1004,6 @@ DEFINE_RPC_XPRT_LIFETIME_EVENT(connect);
 DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_auto);
 DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_done);
 DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_force);
-DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_cleanup);
 DEFINE_RPC_XPRT_LIFETIME_EVENT(destroy);
 
 DECLARE_EVENT_CLASS(rpc_xprt_event,
@@ -2016,17 +2015,18 @@ DECLARE_EVENT_CLASS(svc_deferred_event,
        TP_STRUCT__entry(
                __field(const void *, dr)
                __field(u32, xid)
-               __string(addr, dr->xprt->xpt_remotebuf)
+               __array(__u8, addr, INET6_ADDRSTRLEN + 10)
        ),
 
        TP_fast_assign(
                __entry->dr = dr;
                __entry->xid = be32_to_cpu(*(__be32 *)(dr->args +
                                                       (dr->xprt_hlen>>2)));
-               __assign_str(addr, dr->xprt->xpt_remotebuf);
+               snprintf(__entry->addr, sizeof(__entry->addr) - 1,
+                        "%pISpc", (struct sockaddr *)&dr->addr);
        ),
 
-       TP_printk("addr=%s dr=%p xid=0x%08x", __get_str(addr), __entry->dr,
+       TP_printk("addr=%s dr=%p xid=0x%08x", __entry->addr, __entry->dr,
                __entry->xid)
 );
 
index b567c72023396b8424849638568bf85bf0b71fa6..6e492dba96bfcbf03039ba51b9becf08ae9c6c10 100644 (file)
@@ -35,7 +35,7 @@
 
 /* Stage 1 creates the structure of the recorded event layout */
 
-#include "stages/stage1_defines.h"
+#include "stages/stage1_struct_define.h"
 
 #undef DECLARE_CUSTOM_EVENT_CLASS
 #define DECLARE_CUSTOM_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
@@ -56,7 +56,7 @@
 
 /* Stage 2 creates the custom class */
 
-#include "stages/stage2_defines.h"
+#include "stages/stage2_data_offsets.h"
 
 #undef DECLARE_CUSTOM_EVENT_CLASS
 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
@@ -71,7 +71,7 @@
 
 /* Stage 3 create the way to print the custom event */
 
-#include "stages/stage3_defines.h"
+#include "stages/stage3_trace_output.h"
 
 #undef DECLARE_CUSTOM_EVENT_CLASS
 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -102,7 +102,7 @@ static struct trace_event_functions trace_custom_event_type_funcs_##call = { \
 
 /* Stage 4 creates the offset layout for the fields */
 
-#include "stages/stage4_defines.h"
+#include "stages/stage4_event_fields.h"
 
 #undef DECLARE_CUSTOM_EVENT_CLASS
 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, func, print)    \
@@ -114,7 +114,7 @@ static struct trace_event_fields trace_custom_event_fields_##call[] = {     \
 
 /* Stage 5 creates the helper function for dynamic fields */
 
-#include "stages/stage5_defines.h"
+#include "stages/stage5_get_offsets.h"
 
 #undef DECLARE_CUSTOM_EVENT_CLASS
 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -134,7 +134,7 @@ static inline notrace int trace_custom_event_get_offsets_##call(    \
 
 /* Stage 6 creates the probe function that records the event */
 
-#include "stages/stage6_defines.h"
+#include "stages/stage6_event_callback.h"
 
 #undef DECLARE_CUSTOM_EVENT_CLASS
 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -182,7 +182,7 @@ static inline void ftrace_test_custom_probe_##call(void)            \
 
 /* Stage 7 creates the actual class and event structure for the custom event */
 
-#include "stages/stage7_defines.h"
+#include "stages/stage7_class_define.h"
 
 #undef DECLARE_CUSTOM_EVENT_CLASS
 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
index 8a8cd66cc6d54a036b7485060ddc69d8d32a529a..c2f9cabf154d11d27b7944f7cc14a02812249d5c 100644 (file)
@@ -45,7 +45,7 @@
                             PARAMS(print));                   \
        DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
 
-#include "stages/stage1_defines.h"
+#include "stages/stage1_struct_define.h"
 
 #undef DECLARE_EVENT_CLASS
 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
  * The size of an array is also encoded, in the higher 16 bits of <item>.
  */
 
-#include "stages/stage2_defines.h"
+#include "stages/stage2_data_offsets.h"
 
 #undef DECLARE_EVENT_CLASS
 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  * in binary.
  */
 
-#include "stages/stage3_defines.h"
+#include "stages/stage3_trace_output.h"
 
 #undef DECLARE_EVENT_CLASS
 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -236,7 +236,7 @@ static struct trace_event_functions trace_event_type_funcs_##call = {       \
 
 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
-#include "stages/stage4_defines.h"
+#include "stages/stage4_event_fields.h"
 
 #undef DECLARE_EVENT_CLASS
 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print)   \
@@ -249,7 +249,7 @@ static struct trace_event_fields trace_event_fields_##call[] = {    \
 
 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
-#include "stages/stage5_defines.h"
+#include "stages/stage5_get_offsets.h"
 
 #undef DECLARE_EVENT_CLASS
 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -372,7 +372,7 @@ static inline notrace int trace_event_get_offsets_##call(           \
 #define _TRACE_PERF_INIT(call)
 #endif /* CONFIG_PERF_EVENTS */
 
-#include "stages/stage6_defines.h"
+#include "stages/stage6_event_callback.h"
 
 #undef DECLARE_EVENT_CLASS
 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -418,7 +418,7 @@ static inline void ftrace_test_probe_##call(void)                   \
 
 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
-#include "stages/stage7_defines.h"
+#include "stages/stage7_class_define.h"
 
 #undef DECLARE_EVENT_CLASS
 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
index d2be4eb220082b8042b325aa45ac94c17c64bfb9..1845cf7c80bade8458a78843fa2d0d6e9ad4710c 100644 (file)
@@ -201,11 +201,9 @@ struct io_uring_cqe {
  *
  * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID
  * IORING_CQE_F_MORE   If set, parent SQE will generate more CQE entries
- * IORING_CQE_F_MSG    If set, CQE was generated with IORING_OP_MSG_RING
  */
 #define IORING_CQE_F_BUFFER            (1U << 0)
 #define IORING_CQE_F_MORE              (1U << 1)
-#define IORING_CQE_F_MSG               (1U << 2)
 
 enum {
        IORING_CQE_BUFFER_SHIFT         = 16,
@@ -298,6 +296,7 @@ struct io_uring_params {
 #define IORING_FEAT_NATIVE_WORKERS     (1U << 9)
 #define IORING_FEAT_RSRC_TAGS          (1U << 10)
 #define IORING_FEAT_CQE_SKIP           (1U << 11)
+#define IORING_FEAT_LINKED_FILE                (1U << 12)
 
 /*
  * io_uring_register(2) opcodes and arguments
index 24a1c45bd1ae2a41b945cc77e7474ba2d6617ee5..98e60801195e29a5e38d7f2b2b3fb40d39d34364 100644 (file)
@@ -45,7 +45,7 @@ struct loop_info {
        unsigned long      lo_inode;            /* ioctl r/o */
        __kernel_old_dev_t lo_rdevice;          /* ioctl r/o */
        int                lo_offset;
-       int                lo_encrypt_type;
+       int                lo_encrypt_type;             /* obsolete, ignored */
        int                lo_encrypt_key_size;         /* ioctl w/o */
        int                lo_flags;
        char               lo_name[LO_NAME_SIZE];
@@ -61,7 +61,7 @@ struct loop_info64 {
        __u64              lo_offset;
        __u64              lo_sizelimit;/* bytes, 0 == max available */
        __u32              lo_number;                   /* ioctl r/o */
-       __u32              lo_encrypt_type;
+       __u32              lo_encrypt_type;             /* obsolete, ignored */
        __u32              lo_encrypt_key_size;         /* ioctl w/o */
        __u32              lo_flags;
        __u8               lo_file_name[LO_NAME_SIZE];
index 03e5b776e59764c69ca95997f153df9111391050..97aca4503a6a390319ca7a53e8b19748912b8b19 100644 (file)
@@ -133,7 +133,8 @@ struct rtc_param {
 #define RTC_FEATURE_UPDATE_INTERRUPT   4
 #define RTC_FEATURE_CORRECTION         5
 #define RTC_FEATURE_BACKUP_SWITCH_MODE 6
-#define RTC_FEATURE_CNT                        7
+#define RTC_FEATURE_ALARM_WAKEUP_ONLY  7
+#define RTC_FEATURE_CNT                        8
 
 /* parameter list */
 #define RTC_PARAM_FEATURES             0
index 3021ea25a2849ed351c0c064fdf4a4780a9f7b29..7837ba4fe7289024c10942a7793411e0e3b27e7f 100644 (file)
@@ -1,4 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_STDDEF_H
+#define _UAPI_LINUX_STDDEF_H
+
 #include <linux/compiler_types.h>
 
 #ifndef __always_inline
@@ -41,3 +44,4 @@
                struct { } __empty_ ## NAME; \
                TYPE NAME[]; \
        }
+#endif
index c998860d7bbc4351c37c702ea69ea88a814b19cf..5d99e7c242a25e11dc579e059564aec4989b3387 100644 (file)
 /* Get the valid iova range */
 #define VHOST_VDPA_GET_IOVA_RANGE      _IOR(VHOST_VIRTIO, 0x78, \
                                             struct vhost_vdpa_iova_range)
+
+/* Get the config size */
+#define VHOST_VDPA_GET_CONFIG_SIZE     _IOR(VHOST_VIRTIO, 0x79, __u32)
+
+/* Get the count of all virtqueues */
+#define VHOST_VDPA_GET_VQS_COUNT       _IOR(VHOST_VIRTIO, 0x80, __u32)
+
 #endif
index b5eda06f0d57f3997a4a1e8dea6bbd7edbd1987d..f0fb0ae021c096d4ad338275c4d25cccbe9b2e75 100644 (file)
 /* This feature indicates support for the packed virtqueue layout. */
 #define VIRTIO_F_RING_PACKED           34
 
+/*
+ * Inorder feature indicates that all buffers are used by the device
+ * in the same order in which they have been made available.
+ */
+#define VIRTIO_F_IN_ORDER              35
+
 /*
  * This feature indicates that memory accesses by the driver and the
  * device are ordered in a way described by the platform.
index a03932f1056523de86ee82a381161f83c17c77da..71a54a6849ca9fa33c4ee3e1984629db8d53ae61 100644 (file)
@@ -37,6 +37,7 @@
 #define VIRTIO_CRYPTO_SERVICE_HASH   1
 #define VIRTIO_CRYPTO_SERVICE_MAC    2
 #define VIRTIO_CRYPTO_SERVICE_AEAD   3
+#define VIRTIO_CRYPTO_SERVICE_AKCIPHER 4
 
 #define VIRTIO_CRYPTO_OPCODE(service, op)   (((service) << 8) | (op))
 
@@ -57,6 +58,10 @@ struct virtio_crypto_ctrl_header {
           VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02)
 #define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \
           VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03)
+#define VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION \
+          VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x04)
+#define VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION \
+          VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x05)
        __le32 opcode;
        __le32 algo;
        __le32 flag;
@@ -180,6 +185,58 @@ struct virtio_crypto_aead_create_session_req {
        __u8 padding[32];
 };
 
+struct virtio_crypto_rsa_session_para {
+#define VIRTIO_CRYPTO_RSA_RAW_PADDING   0
+#define VIRTIO_CRYPTO_RSA_PKCS1_PADDING 1
+       __le32 padding_algo;
+
+#define VIRTIO_CRYPTO_RSA_NO_HASH   0
+#define VIRTIO_CRYPTO_RSA_MD2       1
+#define VIRTIO_CRYPTO_RSA_MD3       2
+#define VIRTIO_CRYPTO_RSA_MD4       3
+#define VIRTIO_CRYPTO_RSA_MD5       4
+#define VIRTIO_CRYPTO_RSA_SHA1      5
+#define VIRTIO_CRYPTO_RSA_SHA256    6
+#define VIRTIO_CRYPTO_RSA_SHA384    7
+#define VIRTIO_CRYPTO_RSA_SHA512    8
+#define VIRTIO_CRYPTO_RSA_SHA224    9
+       __le32 hash_algo;
+};
+
+struct virtio_crypto_ecdsa_session_para {
+#define VIRTIO_CRYPTO_CURVE_UNKNOWN   0
+#define VIRTIO_CRYPTO_CURVE_NIST_P192 1
+#define VIRTIO_CRYPTO_CURVE_NIST_P224 2
+#define VIRTIO_CRYPTO_CURVE_NIST_P256 3
+#define VIRTIO_CRYPTO_CURVE_NIST_P384 4
+#define VIRTIO_CRYPTO_CURVE_NIST_P521 5
+       __le32 curve_id;
+       __le32 padding;
+};
+
+struct virtio_crypto_akcipher_session_para {
+#define VIRTIO_CRYPTO_NO_AKCIPHER    0
+#define VIRTIO_CRYPTO_AKCIPHER_RSA   1
+#define VIRTIO_CRYPTO_AKCIPHER_DSA   2
+#define VIRTIO_CRYPTO_AKCIPHER_ECDSA 3
+       __le32 algo;
+
+#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC  1
+#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE 2
+       __le32 keytype;
+       __le32 keylen;
+
+       union {
+               struct virtio_crypto_rsa_session_para rsa;
+               struct virtio_crypto_ecdsa_session_para ecdsa;
+       } u;
+};
+
+struct virtio_crypto_akcipher_create_session_req {
+       struct virtio_crypto_akcipher_session_para para;
+       __u8 padding[36];
+};
+
 struct virtio_crypto_alg_chain_session_para {
 #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER  1
 #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH  2
@@ -247,6 +304,8 @@ struct virtio_crypto_op_ctrl_req {
                        mac_create_session;
                struct virtio_crypto_aead_create_session_req
                        aead_create_session;
+               struct virtio_crypto_akcipher_create_session_req
+                       akcipher_create_session;
                struct virtio_crypto_destroy_session_req
                        destroy_session;
                __u8 padding[56];
@@ -266,6 +325,14 @@ struct virtio_crypto_op_header {
        VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00)
 #define VIRTIO_CRYPTO_AEAD_DECRYPT \
        VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01)
+#define VIRTIO_CRYPTO_AKCIPHER_ENCRYPT \
+       VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x00)
+#define VIRTIO_CRYPTO_AKCIPHER_DECRYPT \
+       VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x01)
+#define VIRTIO_CRYPTO_AKCIPHER_SIGN \
+       VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x02)
+#define VIRTIO_CRYPTO_AKCIPHER_VERIFY \
+       VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x03)
        __le32 opcode;
        /* algo should be service-specific algorithms */
        __le32 algo;
@@ -390,6 +457,16 @@ struct virtio_crypto_aead_data_req {
        __u8 padding[32];
 };
 
+struct virtio_crypto_akcipher_para {
+       __le32 src_data_len;
+       __le32 dst_data_len;
+};
+
+struct virtio_crypto_akcipher_data_req {
+       struct virtio_crypto_akcipher_para para;
+       __u8 padding[40];
+};
+
 /* The request of the data virtqueue's packet */
 struct virtio_crypto_op_data_req {
        struct virtio_crypto_op_header header;
@@ -399,6 +476,7 @@ struct virtio_crypto_op_data_req {
                struct virtio_crypto_hash_data_req hash_req;
                struct virtio_crypto_mac_data_req mac_req;
                struct virtio_crypto_aead_data_req aead_req;
+               struct virtio_crypto_akcipher_data_req akcipher_req;
                __u8 padding[48];
        } u;
 };
@@ -408,6 +486,8 @@ struct virtio_crypto_op_data_req {
 #define VIRTIO_CRYPTO_BADMSG    2
 #define VIRTIO_CRYPTO_NOTSUPP   3
 #define VIRTIO_CRYPTO_INVSESS   4 /* Invalid session id */
+#define VIRTIO_CRYPTO_NOSPC     5 /* no free session ID */
+#define VIRTIO_CRYPTO_KEY_REJECTED 6 /* Signature verification failed */
 
 /* The accelerator hardware is ready */
 #define VIRTIO_CRYPTO_S_HW_READY  (1 << 0)
@@ -438,7 +518,7 @@ struct virtio_crypto_config {
        __le32 max_cipher_key_len;
        /* Maximum length of authenticated key */
        __le32 max_auth_key_len;
-       __le32 reserve;
+       __le32 akcipher_algo;
        /* Maximum size of each crypto request's content */
        __le64 max_size;
 };
index 97463a33baa76bdb6bbe2b006500b70abc6c7036..ddcbefe535e9e784d170bf2dd765dc0ad3e81652 100644 (file)
@@ -62,13 +62,13 @@ config LLD_VERSION
 
 config CC_CAN_LINK
        bool
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m64-flag)) if 64BIT
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m32-flag))
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(USERCFLAGS) $(USERLDFLAGS) $(m64-flag)) if 64BIT
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(USERCFLAGS) $(USERLDFLAGS) $(m32-flag))
 
 config CC_CAN_LINK_STATIC
        bool
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m64-flag) -static) if 64BIT
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m32-flag) -static)
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(USERCFLAGS) $(USERLDFLAGS) $(m64-flag) -static) if 64BIT
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(USERCFLAGS) $(USERLDFLAGS) $(m32-flag) -static)
 
 config CC_HAS_ASM_GOTO
        def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
index 8c6de5a9ecc482e05db7cc35efd2fc90f973a95d..c2f1fd95a8214759dcd2e62d416dfa4e73ca3daf 100644 (file)
@@ -133,14 +133,4 @@ config SCHED_CORE
          which is the likely usage by Linux distributions, there should
          be no measurable impact on performance.
 
-config ARCH_WANTS_RT_DELAYED_SIGNALS
-       bool
-       help
-         This option is selected by architectures where raising signals
-         can happen in atomic contexts on PREEMPT_RT enabled kernels. This
-         option delays raising the signal until the return to user space
-         loop where it is also delivered. X86 requires this to deliver
-         signals from trap handlers which run on IST stacks.
-
-config RT_DELAYED_SIGNALS
-       def_bool PREEMPT_RT && ARCH_WANTS_RT_DELAYED_SIGNALS
+
index 471d71935e90a55b825cf3ccbc8e656ac89e2df8..847a82bfe0e3a6a20116775c8684d7cd10e8d67b 100644 (file)
@@ -114,7 +114,8 @@ obj-$(CONFIG_CPU_PM) += cpu_pm.o
 obj-$(CONFIG_BPF) += bpf/
 obj-$(CONFIG_KCSAN) += kcsan/
 obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
-obj-$(CONFIG_HAVE_STATIC_CALL_INLINE) += static_call.o
+obj-$(CONFIG_HAVE_STATIC_CALL) += static_call.o
+obj-$(CONFIG_HAVE_STATIC_CALL_INLINE) += static_call_inline.o
 obj-$(CONFIG_CFI_CLANG) += cfi.o
 
 obj-$(CONFIG_PERF_EVENTS) += events/
index 5797c2a7a93f415dd622c8aaf34bb98e9e7d7e61..d0a9aa0b42e8d77c79ec9cbf5098736d1c95c3e6 100644 (file)
@@ -71,7 +71,6 @@ struct cpuhp_cpu_state {
        bool                    rollback;
        bool                    single;
        bool                    bringup;
-       int                     cpu;
        struct hlist_node       *node;
        struct hlist_node       *last;
        enum cpuhp_state        cb_state;
@@ -475,7 +474,7 @@ static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
 #endif
 
 static inline enum cpuhp_state
-cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
+cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
 {
        enum cpuhp_state prev_state = st->state;
        bool bringup = st->state < target;
@@ -486,14 +485,15 @@ cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
        st->target = target;
        st->single = false;
        st->bringup = bringup;
-       if (cpu_dying(st->cpu) != !bringup)
-               set_cpu_dying(st->cpu, !bringup);
+       if (cpu_dying(cpu) != !bringup)
+               set_cpu_dying(cpu, !bringup);
 
        return prev_state;
 }
 
 static inline void
-cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
+cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st,
+                 enum cpuhp_state prev_state)
 {
        bool bringup = !st->bringup;
 
@@ -520,8 +520,8 @@ cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
        }
 
        st->bringup = bringup;
-       if (cpu_dying(st->cpu) != !bringup)
-               set_cpu_dying(st->cpu, !bringup);
+       if (cpu_dying(cpu) != !bringup)
+               set_cpu_dying(cpu, !bringup);
 }
 
 /* Regular hotplug invocation of the AP hotplug thread */
@@ -541,15 +541,16 @@ static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
        wait_for_ap_thread(st, st->bringup);
 }
 
-static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
+static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
+                        enum cpuhp_state target)
 {
        enum cpuhp_state prev_state;
        int ret;
 
-       prev_state = cpuhp_set_state(st, target);
+       prev_state = cpuhp_set_state(cpu, st, target);
        __cpuhp_kick_ap(st);
        if ((ret = st->result)) {
-               cpuhp_reset_state(st, prev_state);
+               cpuhp_reset_state(cpu, st, prev_state);
                __cpuhp_kick_ap(st);
        }
 
@@ -581,7 +582,7 @@ static int bringup_wait_for_ap(unsigned int cpu)
        if (st->target <= CPUHP_AP_ONLINE_IDLE)
                return 0;
 
-       return cpuhp_kick_ap(st, st->target);
+       return cpuhp_kick_ap(cpu, st, st->target);
 }
 
 static int bringup_cpu(unsigned int cpu)
@@ -704,7 +705,7 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
                         ret, cpu, cpuhp_get_step(st->state)->name,
                         st->state);
 
-               cpuhp_reset_state(st, prev_state);
+               cpuhp_reset_state(cpu, st, prev_state);
                if (can_rollback_cpu(st))
                        WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
                                                            prev_state));
@@ -721,7 +722,6 @@ static void cpuhp_create(unsigned int cpu)
 
        init_completion(&st->done_up);
        init_completion(&st->done_down);
-       st->cpu = cpu;
 }
 
 static int cpuhp_should_run(unsigned int cpu)
@@ -875,7 +875,7 @@ static int cpuhp_kick_ap_work(unsigned int cpu)
        cpuhp_lock_release(true);
 
        trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
-       ret = cpuhp_kick_ap(st, st->target);
+       ret = cpuhp_kick_ap(cpu, st, st->target);
        trace_cpuhp_exit(cpu, st->state, prev_state, ret);
 
        return ret;
@@ -1107,7 +1107,7 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
                         ret, cpu, cpuhp_get_step(st->state)->name,
                         st->state);
 
-               cpuhp_reset_state(st, prev_state);
+               cpuhp_reset_state(cpu, st, prev_state);
 
                if (st->state < prev_state)
                        WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
@@ -1134,7 +1134,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
 
        cpuhp_tasks_frozen = tasks_frozen;
 
-       prev_state = cpuhp_set_state(st, target);
+       prev_state = cpuhp_set_state(cpu, st, target);
        /*
         * If the current CPU state is in the range of the AP hotplug thread,
         * then we need to kick the thread.
@@ -1165,7 +1165,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
        ret = cpuhp_down_callbacks(cpu, st, target);
        if (ret && st->state < prev_state) {
                if (st->state == CPUHP_TEARDOWN_CPU) {
-                       cpuhp_reset_state(st, prev_state);
+                       cpuhp_reset_state(cpu, st, prev_state);
                        __cpuhp_kick_ap(st);
                } else {
                        WARN(1, "DEAD callback error for CPU%d", cpu);
@@ -1352,7 +1352,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
 
        cpuhp_tasks_frozen = tasks_frozen;
 
-       cpuhp_set_state(st, target);
+       cpuhp_set_state(cpu, st, target);
        /*
         * If the current CPU state is in the range of the AP hotplug thread,
         * then we need to kick the thread once more.
index 35a1d29d6a2e99aad04bb7434e62202838377816..9743c6ccce1a92c66b87af1530bd9396136f0ad3 100644 (file)
@@ -277,12 +277,16 @@ void *dma_direct_alloc(struct device *dev, size_t size,
        }
 
        if (remap) {
+               pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
+
+               if (force_dma_unencrypted(dev))
+                       prot = pgprot_decrypted(prot);
+
                /* remove any dirty cache lines on the kernel alias */
                arch_dma_prep_coherent(page, size);
 
                /* create a coherent mapping */
-               ret = dma_common_contiguous_remap(page, size,
-                               dma_pgprot(dev, PAGE_KERNEL, attrs),
+               ret = dma_common_contiguous_remap(page, size, prot,
                                __builtin_return_address(0));
                if (!ret)
                        goto out_free_pages;
@@ -535,6 +539,8 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
        int ret = -ENXIO;
 
        vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
+       if (force_dma_unencrypted(dev))
+               vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
 
        if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
                return ret;
index 4632b0f4f72eb08c1cea667b9d0e248e5dd1a7b3..8a6cd53dbe8ce11018eb9c05729256576e7d917e 100644 (file)
@@ -114,6 +114,7 @@ static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
                dma_direct_sync_single_for_cpu(dev, addr, size, dir);
 
        if (unlikely(is_swiotlb_buffer(dev, phys)))
-               swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
+               swiotlb_tbl_unmap_single(dev, phys, size, dir,
+                                        attrs | DMA_ATTR_SKIP_CPU_SYNC);
 }
 #endif /* _KERNEL_DMA_DIRECT_H */
index 559461a826babff64054b6a85119e5edabb9f99d..db7244291b745b8ff42ea6ed6b20c33ec8ec1e84 100644 (file)
@@ -407,8 +407,6 @@ EXPORT_SYMBOL(dma_get_sgtable_attrs);
  */
 pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
 {
-       if (force_dma_unencrypted(dev))
-               prot = pgprot_decrypted(prot);
        if (dev_is_dma_coherent(dev))
                return prot;
 #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
index ef8d94a98b7ec2993a5be1f2a97e57373741e020..93c3b86e781c143f73092f3e48d7ed073471ce82 100644 (file)
@@ -142,18 +142,6 @@ void noinstr exit_to_user_mode(void)
 /* Workaround to allow gradual conversion of architecture code */
 void __weak arch_do_signal_or_restart(struct pt_regs *regs) { }
 
-#ifdef CONFIG_RT_DELAYED_SIGNALS
-static inline void raise_delayed_signal(void)
-{
-       if (unlikely(current->forced_info.si_signo)) {
-               force_sig_info(&current->forced_info);
-               current->forced_info.si_signo = 0;
-       }
-}
-#else
-static inline void raise_delayed_signal(void) { }
-#endif
-
 static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
                                            unsigned long ti_work)
 {
@@ -168,8 +156,6 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
                if (ti_work & _TIF_NEED_RESCHED)
                        schedule();
 
-               raise_delayed_signal();
-
                if (ti_work & _TIF_UPROBE)
                        uprobe_notify_resume(regs);
 
@@ -406,7 +392,7 @@ DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
 DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
 void dynamic_irqentry_exit_cond_resched(void)
 {
-       if (!static_key_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
+       if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
                return;
        raw_irqentry_exit_cond_resched();
 }
index cfde994ce61c8a8b6b47ba9d5d9153d1590c8297..23bb19716ad3dc032aa3e22e16904eaea6b817cf 100644 (file)
@@ -574,8 +574,7 @@ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
                              enum event_type_t event_type);
 
 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
-                            enum event_type_t event_type,
-                            struct task_struct *task);
+                            enum event_type_t event_type);
 
 static void update_context_time(struct perf_event_context *ctx);
 static u64 perf_event_time(struct perf_event *event);
@@ -781,7 +780,6 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx,
 static inline void update_cgrp_time_from_event(struct perf_event *event)
 {
        struct perf_cgroup_info *info;
-       struct perf_cgroup *cgrp;
 
        /*
         * ensure we access cgroup data only when needed and
@@ -790,21 +788,19 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
        if (!is_cgroup_event(event))
                return;
 
-       cgrp = perf_cgroup_from_task(current, event->ctx);
+       info = this_cpu_ptr(event->cgrp->info);
        /*
         * Do not update time when cgroup is not active
         */
-       if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) {
-               info = this_cpu_ptr(event->cgrp->info);
+       if (info->active)
                __update_cgrp_time(info, perf_clock(), true);
-       }
 }
 
 static inline void
-perf_cgroup_set_timestamp(struct task_struct *task,
-                         struct perf_event_context *ctx)
+perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
 {
-       struct perf_cgroup *cgrp;
+       struct perf_event_context *ctx = &cpuctx->ctx;
+       struct perf_cgroup *cgrp = cpuctx->cgrp;
        struct perf_cgroup_info *info;
        struct cgroup_subsys_state *css;
 
@@ -813,10 +809,10 @@ perf_cgroup_set_timestamp(struct task_struct *task,
         * ensure we do not access cgroup data
         * unless we have the cgroup pinned (css_get)
         */
-       if (!task || !ctx->nr_cgroups)
+       if (!cgrp)
                return;
 
-       cgrp = perf_cgroup_from_task(task, ctx);
+       WARN_ON_ONCE(!ctx->nr_cgroups);
 
        for (css = &cgrp->css; css; css = css->parent) {
                cgrp = container_of(css, struct perf_cgroup, css);
@@ -828,17 +824,12 @@ perf_cgroup_set_timestamp(struct task_struct *task,
 
 static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list);
 
-#define PERF_CGROUP_SWOUT      0x1 /* cgroup switch out every event */
-#define PERF_CGROUP_SWIN       0x2 /* cgroup switch in events based on task */
-
 /*
  * reschedule events based on the cgroup constraint of task.
- *
- * mode SWOUT : schedule out everything
- * mode SWIN : schedule in based on cgroup for next
  */
-static void perf_cgroup_switch(struct task_struct *task, int mode)
+static void perf_cgroup_switch(struct task_struct *task)
 {
+       struct perf_cgroup *cgrp;
        struct perf_cpu_context *cpuctx, *tmp;
        struct list_head *list;
        unsigned long flags;
@@ -849,35 +840,31 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
         */
        local_irq_save(flags);
 
+       cgrp = perf_cgroup_from_task(task, NULL);
+
        list = this_cpu_ptr(&cgrp_cpuctx_list);
        list_for_each_entry_safe(cpuctx, tmp, list, cgrp_cpuctx_entry) {
                WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
+               if (READ_ONCE(cpuctx->cgrp) == cgrp)
+                       continue;
 
                perf_ctx_lock(cpuctx, cpuctx->task_ctx);
                perf_pmu_disable(cpuctx->ctx.pmu);
 
-               if (mode & PERF_CGROUP_SWOUT) {
-                       cpu_ctx_sched_out(cpuctx, EVENT_ALL);
-                       /*
-                        * must not be done before ctxswout due
-                        * to event_filter_match() in event_sched_out()
-                        */
-                       cpuctx->cgrp = NULL;
-               }
+               cpu_ctx_sched_out(cpuctx, EVENT_ALL);
+               /*
+                * must not be done before ctxswout due
+                * to update_cgrp_time_from_cpuctx() in
+                * ctx_sched_out()
+                */
+               cpuctx->cgrp = cgrp;
+               /*
+                * set cgrp before ctxsw in to allow
+                * perf_cgroup_set_timestamp() in ctx_sched_in()
+                * to not have to pass task around
+                */
+               cpu_ctx_sched_in(cpuctx, EVENT_ALL);
 
-               if (mode & PERF_CGROUP_SWIN) {
-                       WARN_ON_ONCE(cpuctx->cgrp);
-                       /*
-                        * set cgrp before ctxsw in to allow
-                        * event_filter_match() to not have to pass
-                        * task around
-                        * we pass the cpuctx->ctx to perf_cgroup_from_task()
-                        * because cgorup events are only per-cpu
-                        */
-                       cpuctx->cgrp = perf_cgroup_from_task(task,
-                                                            &cpuctx->ctx);
-                       cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
-               }
                perf_pmu_enable(cpuctx->ctx.pmu);
                perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
        }
@@ -885,58 +872,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
        local_irq_restore(flags);
 }
 
-static inline void perf_cgroup_sched_out(struct task_struct *task,
-                                        struct task_struct *next)
-{
-       struct perf_cgroup *cgrp1;
-       struct perf_cgroup *cgrp2 = NULL;
-
-       rcu_read_lock();
-       /*
-        * we come here when we know perf_cgroup_events > 0
-        * we do not need to pass the ctx here because we know
-        * we are holding the rcu lock
-        */
-       cgrp1 = perf_cgroup_from_task(task, NULL);
-       cgrp2 = perf_cgroup_from_task(next, NULL);
-
-       /*
-        * only schedule out current cgroup events if we know
-        * that we are switching to a different cgroup. Otherwise,
-        * do no touch the cgroup events.
-        */
-       if (cgrp1 != cgrp2)
-               perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
-
-       rcu_read_unlock();
-}
-
-static inline void perf_cgroup_sched_in(struct task_struct *prev,
-                                       struct task_struct *task)
-{
-       struct perf_cgroup *cgrp1;
-       struct perf_cgroup *cgrp2 = NULL;
-
-       rcu_read_lock();
-       /*
-        * we come here when we know perf_cgroup_events > 0
-        * we do not need to pass the ctx here because we know
-        * we are holding the rcu lock
-        */
-       cgrp1 = perf_cgroup_from_task(task, NULL);
-       cgrp2 = perf_cgroup_from_task(prev, NULL);
-
-       /*
-        * only need to schedule in cgroup events if we are changing
-        * cgroup during ctxsw. Cgroup events were not scheduled
-        * out of ctxsw out if that was not the case.
-        */
-       if (cgrp1 != cgrp2)
-               perf_cgroup_switch(task, PERF_CGROUP_SWIN);
-
-       rcu_read_unlock();
-}
-
 static int perf_cgroup_ensure_storage(struct perf_event *event,
                                struct cgroup_subsys_state *css)
 {
@@ -1032,22 +967,10 @@ perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ct
         */
        cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
 
-       /*
-        * Since setting cpuctx->cgrp is conditional on the current @cgrp
-        * matching the event's cgroup, we must do this for every new event,
-        * because if the first would mismatch, the second would not try again
-        * and we would leave cpuctx->cgrp unset.
-        */
-       if (ctx->is_active && !cpuctx->cgrp) {
-               struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
-
-               if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
-                       cpuctx->cgrp = cgrp;
-       }
-
        if (ctx->nr_cgroups++)
                return;
 
+       cpuctx->cgrp = perf_cgroup_from_task(current, ctx);
        list_add(&cpuctx->cgrp_cpuctx_entry,
                        per_cpu_ptr(&cgrp_cpuctx_list, event->cpu));
 }
@@ -1069,9 +992,7 @@ perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *c
        if (--ctx->nr_cgroups)
                return;
 
-       if (ctx->is_active && cpuctx->cgrp)
-               cpuctx->cgrp = NULL;
-
+       cpuctx->cgrp = NULL;
        list_del(&cpuctx->cgrp_cpuctx_entry);
 }
 
@@ -1100,16 +1021,6 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx,
 {
 }
 
-static inline void perf_cgroup_sched_out(struct task_struct *task,
-                                        struct task_struct *next)
-{
-}
-
-static inline void perf_cgroup_sched_in(struct task_struct *prev,
-                                       struct task_struct *task)
-{
-}
-
 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
                                      struct perf_event_attr *attr,
                                      struct perf_event *group_leader)
@@ -1118,13 +1029,7 @@ static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
 }
 
 static inline void
-perf_cgroup_set_timestamp(struct task_struct *task,
-                         struct perf_event_context *ctx)
-{
-}
-
-static inline void
-perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
+perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
 {
 }
 
@@ -1147,6 +1052,10 @@ static inline void
 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
 {
 }
+
+static void perf_cgroup_switch(struct task_struct *task)
+{
+}
 #endif
 
 /*
@@ -2713,8 +2622,7 @@ static void ctx_sched_out(struct perf_event_context *ctx,
 static void
 ctx_sched_in(struct perf_event_context *ctx,
             struct perf_cpu_context *cpuctx,
-            enum event_type_t event_type,
-            struct task_struct *task);
+            enum event_type_t event_type);
 
 static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
                               struct perf_event_context *ctx,
@@ -2730,15 +2638,14 @@ static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
 }
 
 static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
-                               struct perf_event_context *ctx,
-                               struct task_struct *task)
+                               struct perf_event_context *ctx)
 {
-       cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
+       cpu_ctx_sched_in(cpuctx, EVENT_PINNED);
        if (ctx)
-               ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
-       cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
+               ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
+       cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
        if (ctx)
-               ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
+               ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
 }
 
 /*
@@ -2788,7 +2695,7 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
        else if (ctx_event_type & EVENT_PINNED)
                cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
 
-       perf_event_sched_in(cpuctx, task_ctx, current);
+       perf_event_sched_in(cpuctx, task_ctx);
        perf_pmu_enable(cpuctx->ctx.pmu);
 }
 
@@ -3011,7 +2918,7 @@ static void __perf_event_enable(struct perf_event *event,
                return;
 
        if (!event_filter_match(event)) {
-               ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
+               ctx_sched_in(ctx, cpuctx, EVENT_TIME);
                return;
        }
 
@@ -3020,7 +2927,7 @@ static void __perf_event_enable(struct perf_event *event,
         * then don't put it on unless the group is on.
         */
        if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
-               ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
+               ctx_sched_in(ctx, cpuctx, EVENT_TIME);
                return;
        }
 
@@ -3668,7 +3575,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
         * cgroup event are system-wide mode only
         */
        if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
-               perf_cgroup_sched_out(task, next);
+               perf_cgroup_switch(next);
 }
 
 /*
@@ -3865,8 +3772,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
 static void
 ctx_sched_in(struct perf_event_context *ctx,
             struct perf_cpu_context *cpuctx,
-            enum event_type_t event_type,
-            struct task_struct *task)
+            enum event_type_t event_type)
 {
        int is_active = ctx->is_active;
 
@@ -3878,7 +3784,7 @@ ctx_sched_in(struct perf_event_context *ctx,
        if (is_active ^ EVENT_TIME) {
                /* start ctx time */
                __update_context_time(ctx, false);
-               perf_cgroup_set_timestamp(task, ctx);
+               perf_cgroup_set_timestamp(cpuctx);
                /*
                 * CPU-release for the below ->is_active store,
                 * see __load_acquire() in perf_event_time_now()
@@ -3909,12 +3815,11 @@ ctx_sched_in(struct perf_event_context *ctx,
 }
 
 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
-                            enum event_type_t event_type,
-                            struct task_struct *task)
+                            enum event_type_t event_type)
 {
        struct perf_event_context *ctx = &cpuctx->ctx;
 
-       ctx_sched_in(ctx, cpuctx, event_type, task);
+       ctx_sched_in(ctx, cpuctx, event_type);
 }
 
 static void perf_event_context_sched_in(struct perf_event_context *ctx,
@@ -3956,7 +3861,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
         */
        if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
                cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
-       perf_event_sched_in(cpuctx, ctx, task);
+       perf_event_sched_in(cpuctx, ctx);
 
        if (cpuctx->sched_cb_usage && pmu->sched_task)
                pmu->sched_task(cpuctx->task_ctx, true);
@@ -3984,16 +3889,6 @@ void __perf_event_task_sched_in(struct task_struct *prev,
        struct perf_event_context *ctx;
        int ctxn;
 
-       /*
-        * If cgroup events exist on this CPU, then we need to check if we have
-        * to switch in PMU state; cgroup event are system-wide mode only.
-        *
-        * Since cgroup events are CPU events, we must schedule these in before
-        * we schedule in the task events.
-        */
-       if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
-               perf_cgroup_sched_in(prev, task);
-
        for_each_task_context_nr(ctxn) {
                ctx = task->perf_event_ctxp[ctxn];
                if (likely(!ctx))
@@ -4267,7 +4162,7 @@ static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
        if (cpu_event)
                rotate_ctx(&cpuctx->ctx, cpu_event);
 
-       perf_event_sched_in(cpuctx, task_ctx, current);
+       perf_event_sched_in(cpuctx, task_ctx);
 
        perf_pmu_enable(cpuctx->ctx.pmu);
        perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
@@ -4339,7 +4234,7 @@ static void perf_event_enable_on_exec(int ctxn)
                clone_ctx = unclone_ctx(ctx);
                ctx_resched(cpuctx, ctx, event_type);
        } else {
-               ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
+               ctx_sched_in(ctx, cpuctx, EVENT_TIME);
        }
        perf_ctx_unlock(cpuctx, ctx);
 
@@ -11635,6 +11530,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 
        event->state            = PERF_EVENT_STATE_INACTIVE;
 
+       if (parent_event)
+               event->event_caps = parent_event->event_caps;
+
        if (event->attr.sigtrap)
                atomic_set(&event->event_limit, 1);
 
@@ -13562,7 +13460,7 @@ static int __perf_cgroup_move(void *info)
 {
        struct task_struct *task = info;
        rcu_read_lock();
-       perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
+       perf_cgroup_switch(task);
        rcu_read_unlock();
        return 0;
 }
index f7ff8919dc9bbb1ab2a907c1b28a5605dc894251..fdf170404650f7e96559cc2df2d783618e5f93bc 100644 (file)
@@ -269,8 +269,9 @@ static int __irq_build_affinity_masks(unsigned int startvec,
         */
        if (numvecs <= nodes) {
                for_each_node_mask(n, nodemsk) {
-                       cpumask_or(&masks[curvec].mask, &masks[curvec].mask,
-                                  node_to_cpumask[n]);
+                       /* Ensure that only CPUs which are in both masks are set */
+                       cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
+                       cpumask_or(&masks[curvec].mask, &masks[curvec].mask, nmsk);
                        if (++curvec == last_affv)
                                curvec = firstvec;
                }
index f7df715ec28e6de930c7b00d7f2789801761c07a..7afa40fe5cc43ed51bd78ebc4f049685428425dd 100644 (file)
@@ -137,7 +137,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
        if (!irq_work_claim(work))
                return false;
 
-       kasan_record_aux_stack(work);
+       kasan_record_aux_stack_noalloc(work);
 
        preempt_disable();
        if (cpu != smp_processor_id()) {
index d575b491492593d98a098e2e5486ddafed6cd86c..51efaabac3e4303c536e88f9de5df8eae2f72eeb 100644 (file)
@@ -5752,6 +5752,8 @@ static inline struct task_struct *pick_task(struct rq *rq)
 
 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
 
+static void queue_core_balance(struct rq *rq);
+
 static struct task_struct *
 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
@@ -5801,7 +5803,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
                }
 
                rq->core_pick = NULL;
-               return next;
+               goto out;
        }
 
        put_prev_task_balance(rq, prev, rf);
@@ -5851,7 +5853,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
                         */
                        WARN_ON_ONCE(fi_before);
                        task_vruntime_update(rq, next, false);
-                       goto done;
+                       goto out_set_next;
                }
        }
 
@@ -5970,8 +5972,12 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
                resched_curr(rq_i);
        }
 
-done:
+out_set_next:
        set_next_task(rq, next);
+out:
+       if (rq->core->core_forceidle_count && next == rq->idle)
+               queue_core_balance(rq);
+
        return next;
 }
 
@@ -6000,7 +6006,7 @@ static bool try_steal_cookie(int this, int that)
                if (p == src->core_pick || p == src->curr)
                        goto next;
 
-               if (!cpumask_test_cpu(this, &p->cpus_mask))
+               if (!is_cpu_allowed(p, this))
                        goto next;
 
                if (p->core_occupation > dst->idle->core_occupation)
@@ -6066,7 +6072,7 @@ static void sched_core_balance(struct rq *rq)
 
 static DEFINE_PER_CPU(struct callback_head, core_balance_head);
 
-void queue_core_balance(struct rq *rq)
+static void queue_core_balance(struct rq *rq)
 {
        if (!sched_core_enabled(rq))
                return;
index 8f8b5020e76af237e4f15ee5db780c050dd96744..ecb0d705287753f080d347bcd3ae1961a5292f61 100644 (file)
@@ -434,7 +434,6 @@ static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool fir
 {
        update_idle_core(rq);
        schedstat_inc(rq->sched_goidle);
-       queue_core_balance(rq);
 }
 
 #ifdef CONFIG_SMP
index 58263f90c5598b7c658b007285dbce60726bc78a..8dccb34eb1908b07379284d9708c37274cca2cec 100644 (file)
@@ -1232,8 +1232,6 @@ static inline bool sched_group_cookie_match(struct rq *rq,
        return false;
 }
 
-extern void queue_core_balance(struct rq *rq);
-
 static inline bool sched_core_enqueued(struct task_struct *p)
 {
        return !RB_EMPTY_NODE(&p->core_node);
@@ -1267,10 +1265,6 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
        return &rq->__lock;
 }
 
-static inline void queue_core_balance(struct rq *rq)
-{
-}
-
 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
 {
        return true;
index 368a34c25bbf10db57e3c23effdaa314848d8bf2..30cd1ca43bcd5be968a4abd5f8090e3ce9e06f99 100644 (file)
@@ -1307,43 +1307,6 @@ enum sig_handler {
        HANDLER_EXIT,    /* Only visible as the process exit code */
 };
 
-/*
- * On some archictectures, PREEMPT_RT has to delay sending a signal from a
- * trap since it cannot enable preemption, and the signal code's
- * spin_locks turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME
- * which will send the signal on exit of the trap.
- */
-#ifdef CONFIG_RT_DELAYED_SIGNALS
-static inline bool force_sig_delayed(struct kernel_siginfo *info,
-                                    struct task_struct *t)
-{
-       if (!in_atomic())
-               return false;
-
-       if (WARN_ON_ONCE(t->forced_info.si_signo))
-               return true;
-
-       if (is_si_special(info)) {
-               WARN_ON_ONCE(info != SEND_SIG_PRIV);
-               t->forced_info.si_signo = info->si_signo;
-               t->forced_info.si_errno = 0;
-               t->forced_info.si_code = SI_KERNEL;
-               t->forced_info.si_pid = 0;
-               t->forced_info.si_uid = 0;
-       } else {
-               t->forced_info = *info;
-       }
-       set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
-       return true;
-}
-#else
-static inline bool force_sig_delayed(struct kernel_siginfo *info,
-                                    struct task_struct *t)
-{
-       return false;
-}
-#endif
-
 /*
  * Force a signal that the process can't ignore: if necessary
  * we unblock the signal and change any SIG_IGN to SIG_DFL.
@@ -1364,9 +1327,6 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
        struct k_sigaction *action;
        int sig = info->si_signo;
 
-       if (force_sig_delayed(info, t))
-               return 0;
-
        spin_lock_irqsave(&t->sighand->siglock, flags);
        action = &t->sighand->action[sig-1];
        ignored = action->sa.sa_handler == SIG_IGN;
index 01a7c1706a58b1df8724f8afc49b4a3a46919bcd..65a630f62363c2a04588e0f7bcf328548b820ca1 100644 (file)
@@ -579,7 +579,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
 
        /* There shouldn't be any pending callbacks on an offline CPU. */
        if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
-                    !warned && !llist_empty(head))) {
+                    !warned && entry != NULL)) {
                warned = true;
                WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
 
index f2b8baea35d2ef4b75342083dcb44254c738f454..e9c3e69f383792464c01b2aec9e63967829537f9 100644 (file)
@@ -1,549 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
-#include <linux/init.h>
 #include <linux/static_call.h>
-#include <linux/bug.h>
-#include <linux/smp.h>
-#include <linux/sort.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/cpu.h>
-#include <linux/processor.h>
-#include <asm/sections.h>
-
-extern struct static_call_site __start_static_call_sites[],
-                              __stop_static_call_sites[];
-extern struct static_call_tramp_key __start_static_call_tramp_key[],
-                                   __stop_static_call_tramp_key[];
-
-static bool static_call_initialized;
-
-/* mutex to protect key modules/sites */
-static DEFINE_MUTEX(static_call_mutex);
-
-static void static_call_lock(void)
-{
-       mutex_lock(&static_call_mutex);
-}
-
-static void static_call_unlock(void)
-{
-       mutex_unlock(&static_call_mutex);
-}
-
-static inline void *static_call_addr(struct static_call_site *site)
-{
-       return (void *)((long)site->addr + (long)&site->addr);
-}
-
-static inline unsigned long __static_call_key(const struct static_call_site *site)
-{
-       return (long)site->key + (long)&site->key;
-}
-
-static inline struct static_call_key *static_call_key(const struct static_call_site *site)
-{
-       return (void *)(__static_call_key(site) & ~STATIC_CALL_SITE_FLAGS);
-}
-
-/* These assume the key is word-aligned. */
-static inline bool static_call_is_init(struct static_call_site *site)
-{
-       return __static_call_key(site) & STATIC_CALL_SITE_INIT;
-}
-
-static inline bool static_call_is_tail(struct static_call_site *site)
-{
-       return __static_call_key(site) & STATIC_CALL_SITE_TAIL;
-}
-
-static inline void static_call_set_init(struct static_call_site *site)
-{
-       site->key = (__static_call_key(site) | STATIC_CALL_SITE_INIT) -
-                   (long)&site->key;
-}
-
-static int static_call_site_cmp(const void *_a, const void *_b)
-{
-       const struct static_call_site *a = _a;
-       const struct static_call_site *b = _b;
-       const struct static_call_key *key_a = static_call_key(a);
-       const struct static_call_key *key_b = static_call_key(b);
-
-       if (key_a < key_b)
-               return -1;
-
-       if (key_a > key_b)
-               return 1;
-
-       return 0;
-}
-
-static void static_call_site_swap(void *_a, void *_b, int size)
-{
-       long delta = (unsigned long)_a - (unsigned long)_b;
-       struct static_call_site *a = _a;
-       struct static_call_site *b = _b;
-       struct static_call_site tmp = *a;
-
-       a->addr = b->addr  - delta;
-       a->key  = b->key   - delta;
-
-       b->addr = tmp.addr + delta;
-       b->key  = tmp.key  + delta;
-}
-
-static inline void static_call_sort_entries(struct static_call_site *start,
-                                           struct static_call_site *stop)
-{
-       sort(start, stop - start, sizeof(struct static_call_site),
-            static_call_site_cmp, static_call_site_swap);
-}
-
-static inline bool static_call_key_has_mods(struct static_call_key *key)
-{
-       return !(key->type & 1);
-}
-
-static inline struct static_call_mod *static_call_key_next(struct static_call_key *key)
-{
-       if (!static_call_key_has_mods(key))
-               return NULL;
-
-       return key->mods;
-}
-
-static inline struct static_call_site *static_call_key_sites(struct static_call_key *key)
-{
-       if (static_call_key_has_mods(key))
-               return NULL;
-
-       return (struct static_call_site *)(key->type & ~1);
-}
-
-void __static_call_update(struct static_call_key *key, void *tramp, void *func)
-{
-       struct static_call_site *site, *stop;
-       struct static_call_mod *site_mod, first;
-
-       cpus_read_lock();
-       static_call_lock();
-
-       if (key->func == func)
-               goto done;
-
-       key->func = func;
-
-       arch_static_call_transform(NULL, tramp, func, false);
-
-       /*
-        * If uninitialized, we'll not update the callsites, but they still
-        * point to the trampoline and we just patched that.
-        */
-       if (WARN_ON_ONCE(!static_call_initialized))
-               goto done;
-
-       first = (struct static_call_mod){
-               .next = static_call_key_next(key),
-               .mod = NULL,
-               .sites = static_call_key_sites(key),
-       };
-
-       for (site_mod = &first; site_mod; site_mod = site_mod->next) {
-               bool init = system_state < SYSTEM_RUNNING;
-               struct module *mod = site_mod->mod;
-
-               if (!site_mod->sites) {
-                       /*
-                        * This can happen if the static call key is defined in
-                        * a module which doesn't use it.
-                        *
-                        * It also happens in the has_mods case, where the
-                        * 'first' entry has no sites associated with it.
-                        */
-                       continue;
-               }
-
-               stop = __stop_static_call_sites;
-
-               if (mod) {
-#ifdef CONFIG_MODULES
-                       stop = mod->static_call_sites +
-                              mod->num_static_call_sites;
-                       init = mod->state == MODULE_STATE_COMING;
-#endif
-               }
-
-               for (site = site_mod->sites;
-                    site < stop && static_call_key(site) == key; site++) {
-                       void *site_addr = static_call_addr(site);
-
-                       if (!init && static_call_is_init(site))
-                               continue;
-
-                       if (!kernel_text_address((unsigned long)site_addr)) {
-                               /*
-                                * This skips patching built-in __exit, which
-                                * is part of init_section_contains() but is
-                                * not part of kernel_text_address().
-                                *
-                                * Skipping built-in __exit is fine since it
-                                * will never be executed.
-                                */
-                               WARN_ONCE(!static_call_is_init(site),
-                                         "can't patch static call site at %pS",
-                                         site_addr);
-                               continue;
-                       }
-
-                       arch_static_call_transform(site_addr, NULL, func,
-                                                  static_call_is_tail(site));
-               }
-       }
-
-done:
-       static_call_unlock();
-       cpus_read_unlock();
-}
-EXPORT_SYMBOL_GPL(__static_call_update);
-
-static int __static_call_init(struct module *mod,
-                             struct static_call_site *start,
-                             struct static_call_site *stop)
-{
-       struct static_call_site *site;
-       struct static_call_key *key, *prev_key = NULL;
-       struct static_call_mod *site_mod;
-
-       if (start == stop)
-               return 0;
-
-       static_call_sort_entries(start, stop);
-
-       for (site = start; site < stop; site++) {
-               void *site_addr = static_call_addr(site);
-
-               if ((mod && within_module_init((unsigned long)site_addr, mod)) ||
-                   (!mod && init_section_contains(site_addr, 1)))
-                       static_call_set_init(site);
-
-               key = static_call_key(site);
-               if (key != prev_key) {
-                       prev_key = key;
-
-                       /*
-                        * For vmlinux (!mod) avoid the allocation by storing
-                        * the sites pointer in the key itself. Also see
-                        * __static_call_update()'s @first.
-                        *
-                        * This allows architectures (eg. x86) to call
-                        * static_call_init() before memory allocation works.
-                        */
-                       if (!mod) {
-                               key->sites = site;
-                               key->type |= 1;
-                               goto do_transform;
-                       }
-
-                       site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
-                       if (!site_mod)
-                               return -ENOMEM;
-
-                       /*
-                        * When the key has a direct sites pointer, extract
-                        * that into an explicit struct static_call_mod, so we
-                        * can have a list of modules.
-                        */
-                       if (static_call_key_sites(key)) {
-                               site_mod->mod = NULL;
-                               site_mod->next = NULL;
-                               site_mod->sites = static_call_key_sites(key);
-
-                               key->mods = site_mod;
-
-                               site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
-                               if (!site_mod)
-                                       return -ENOMEM;
-                       }
-
-                       site_mod->mod = mod;
-                       site_mod->sites = site;
-                       site_mod->next = static_call_key_next(key);
-                       key->mods = site_mod;
-               }
-
-do_transform:
-               arch_static_call_transform(site_addr, NULL, key->func,
-                               static_call_is_tail(site));
-       }
-
-       return 0;
-}
-
-static int addr_conflict(struct static_call_site *site, void *start, void *end)
-{
-       unsigned long addr = (unsigned long)static_call_addr(site);
-
-       if (addr <= (unsigned long)end &&
-           addr + CALL_INSN_SIZE > (unsigned long)start)
-               return 1;
-
-       return 0;
-}
-
-static int __static_call_text_reserved(struct static_call_site *iter_start,
-                                      struct static_call_site *iter_stop,
-                                      void *start, void *end, bool init)
-{
-       struct static_call_site *iter = iter_start;
-
-       while (iter < iter_stop) {
-               if (init || !static_call_is_init(iter)) {
-                       if (addr_conflict(iter, start, end))
-                               return 1;
-               }
-               iter++;
-       }
-
-       return 0;
-}
-
-#ifdef CONFIG_MODULES
-
-static int __static_call_mod_text_reserved(void *start, void *end)
-{
-       struct module *mod;
-       int ret;
-
-       preempt_disable();
-       mod = __module_text_address((unsigned long)start);
-       WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
-       if (!try_module_get(mod))
-               mod = NULL;
-       preempt_enable();
-
-       if (!mod)
-               return 0;
-
-       ret = __static_call_text_reserved(mod->static_call_sites,
-                       mod->static_call_sites + mod->num_static_call_sites,
-                       start, end, mod->state == MODULE_STATE_COMING);
-
-       module_put(mod);
-
-       return ret;
-}
-
-static unsigned long tramp_key_lookup(unsigned long addr)
-{
-       struct static_call_tramp_key *start = __start_static_call_tramp_key;
-       struct static_call_tramp_key *stop = __stop_static_call_tramp_key;
-       struct static_call_tramp_key *tramp_key;
-
-       for (tramp_key = start; tramp_key != stop; tramp_key++) {
-               unsigned long tramp;
-
-               tramp = (long)tramp_key->tramp + (long)&tramp_key->tramp;
-               if (tramp == addr)
-                       return (long)tramp_key->key + (long)&tramp_key->key;
-       }
-
-       return 0;
-}
-
-static int static_call_add_module(struct module *mod)
-{
-       struct static_call_site *start = mod->static_call_sites;
-       struct static_call_site *stop = start + mod->num_static_call_sites;
-       struct static_call_site *site;
-
-       for (site = start; site != stop; site++) {
-               unsigned long s_key = __static_call_key(site);
-               unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS;
-               unsigned long key;
-
-               /*
-                * Is the key is exported, 'addr' points to the key, which
-                * means modules are allowed to call static_call_update() on
-                * it.
-                *
-                * Otherwise, the key isn't exported, and 'addr' points to the
-                * trampoline so we need to lookup the key.
-                *
-                * We go through this dance to prevent crazy modules from
-                * abusing sensitive static calls.
-                */
-               if (!kernel_text_address(addr))
-                       continue;
-
-               key = tramp_key_lookup(addr);
-               if (!key) {
-                       pr_warn("Failed to fixup __raw_static_call() usage at: %ps\n",
-                               static_call_addr(site));
-                       return -EINVAL;
-               }
-
-               key |= s_key & STATIC_CALL_SITE_FLAGS;
-               site->key = key - (long)&site->key;
-       }
-
-       return __static_call_init(mod, start, stop);
-}
-
-static void static_call_del_module(struct module *mod)
-{
-       struct static_call_site *start = mod->static_call_sites;
-       struct static_call_site *stop = mod->static_call_sites +
-                                       mod->num_static_call_sites;
-       struct static_call_key *key, *prev_key = NULL;
-       struct static_call_mod *site_mod, **prev;
-       struct static_call_site *site;
-
-       for (site = start; site < stop; site++) {
-               key = static_call_key(site);
-               if (key == prev_key)
-                       continue;
-
-               prev_key = key;
-
-               for (prev = &key->mods, site_mod = key->mods;
-                    site_mod && site_mod->mod != mod;
-                    prev = &site_mod->next, site_mod = site_mod->next)
-                       ;
-
-               if (!site_mod)
-                       continue;
-
-               *prev = site_mod->next;
-               kfree(site_mod);
-       }
-}
-
-static int static_call_module_notify(struct notifier_block *nb,
-                                    unsigned long val, void *data)
-{
-       struct module *mod = data;
-       int ret = 0;
-
-       cpus_read_lock();
-       static_call_lock();
-
-       switch (val) {
-       case MODULE_STATE_COMING:
-               ret = static_call_add_module(mod);
-               if (ret) {
-                       WARN(1, "Failed to allocate memory for static calls");
-                       static_call_del_module(mod);
-               }
-               break;
-       case MODULE_STATE_GOING:
-               static_call_del_module(mod);
-               break;
-       }
-
-       static_call_unlock();
-       cpus_read_unlock();
-
-       return notifier_from_errno(ret);
-}
-
-static struct notifier_block static_call_module_nb = {
-       .notifier_call = static_call_module_notify,
-};
-
-#else
-
-static inline int __static_call_mod_text_reserved(void *start, void *end)
-{
-       return 0;
-}
-
-#endif /* CONFIG_MODULES */
-
-int static_call_text_reserved(void *start, void *end)
-{
-       bool init = system_state < SYSTEM_RUNNING;
-       int ret = __static_call_text_reserved(__start_static_call_sites,
-                       __stop_static_call_sites, start, end, init);
-
-       if (ret)
-               return ret;
-
-       return __static_call_mod_text_reserved(start, end);
-}
-
-int __init static_call_init(void)
-{
-       int ret;
-
-       if (static_call_initialized)
-               return 0;
-
-       cpus_read_lock();
-       static_call_lock();
-       ret = __static_call_init(NULL, __start_static_call_sites,
-                                __stop_static_call_sites);
-       static_call_unlock();
-       cpus_read_unlock();
-
-       if (ret) {
-               pr_err("Failed to allocate memory for static_call!\n");
-               BUG();
-       }
-
-       static_call_initialized = true;
-
-#ifdef CONFIG_MODULES
-       register_module_notifier(&static_call_module_nb);
-#endif
-       return 0;
-}
-early_initcall(static_call_init);
 
 long __static_call_return0(void)
 {
        return 0;
 }
 EXPORT_SYMBOL_GPL(__static_call_return0);
-
-#ifdef CONFIG_STATIC_CALL_SELFTEST
-
-static int func_a(int x)
-{
-       return x+1;
-}
-
-static int func_b(int x)
-{
-       return x+2;
-}
-
-DEFINE_STATIC_CALL(sc_selftest, func_a);
-
-static struct static_call_data {
-      int (*func)(int);
-      int val;
-      int expect;
-} static_call_data [] __initdata = {
-      { NULL,   2, 3 },
-      { func_b, 2, 4 },
-      { func_a, 2, 3 }
-};
-
-static int __init test_static_call_init(void)
-{
-      int i;
-
-      for (i = 0; i < ARRAY_SIZE(static_call_data); i++ ) {
-             struct static_call_data *scd = &static_call_data[i];
-
-              if (scd->func)
-                      static_call_update(sc_selftest, scd->func);
-
-              WARN_ON(static_call(sc_selftest)(scd->val) != scd->expect);
-      }
-
-      return 0;
-}
-early_initcall(test_static_call_init);
-
-#endif /* CONFIG_STATIC_CALL_SELFTEST */
diff --git a/kernel/static_call_inline.c b/kernel/static_call_inline.c
new file mode 100644 (file)
index 0000000..dc5665b
--- /dev/null
@@ -0,0 +1,543 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/static_call.h>
+#include <linux/bug.h>
+#include <linux/smp.h>
+#include <linux/sort.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/processor.h>
+#include <asm/sections.h>
+
+extern struct static_call_site __start_static_call_sites[],
+                              __stop_static_call_sites[];
+extern struct static_call_tramp_key __start_static_call_tramp_key[],
+                                   __stop_static_call_tramp_key[];
+
+static bool static_call_initialized;
+
+/* mutex to protect key modules/sites */
+static DEFINE_MUTEX(static_call_mutex);
+
+static void static_call_lock(void)
+{
+       mutex_lock(&static_call_mutex);
+}
+
+static void static_call_unlock(void)
+{
+       mutex_unlock(&static_call_mutex);
+}
+
+static inline void *static_call_addr(struct static_call_site *site)
+{
+       return (void *)((long)site->addr + (long)&site->addr);
+}
+
+static inline unsigned long __static_call_key(const struct static_call_site *site)
+{
+       return (long)site->key + (long)&site->key;
+}
+
+static inline struct static_call_key *static_call_key(const struct static_call_site *site)
+{
+       return (void *)(__static_call_key(site) & ~STATIC_CALL_SITE_FLAGS);
+}
+
+/* These assume the key is word-aligned. */
+static inline bool static_call_is_init(struct static_call_site *site)
+{
+       return __static_call_key(site) & STATIC_CALL_SITE_INIT;
+}
+
+static inline bool static_call_is_tail(struct static_call_site *site)
+{
+       return __static_call_key(site) & STATIC_CALL_SITE_TAIL;
+}
+
+static inline void static_call_set_init(struct static_call_site *site)
+{
+       site->key = (__static_call_key(site) | STATIC_CALL_SITE_INIT) -
+                   (long)&site->key;
+}
+
+static int static_call_site_cmp(const void *_a, const void *_b)
+{
+       const struct static_call_site *a = _a;
+       const struct static_call_site *b = _b;
+       const struct static_call_key *key_a = static_call_key(a);
+       const struct static_call_key *key_b = static_call_key(b);
+
+       if (key_a < key_b)
+               return -1;
+
+       if (key_a > key_b)
+               return 1;
+
+       return 0;
+}
+
+static void static_call_site_swap(void *_a, void *_b, int size)
+{
+       long delta = (unsigned long)_a - (unsigned long)_b;
+       struct static_call_site *a = _a;
+       struct static_call_site *b = _b;
+       struct static_call_site tmp = *a;
+
+       a->addr = b->addr  - delta;
+       a->key  = b->key   - delta;
+
+       b->addr = tmp.addr + delta;
+       b->key  = tmp.key  + delta;
+}
+
+static inline void static_call_sort_entries(struct static_call_site *start,
+                                           struct static_call_site *stop)
+{
+       sort(start, stop - start, sizeof(struct static_call_site),
+            static_call_site_cmp, static_call_site_swap);
+}
+
+static inline bool static_call_key_has_mods(struct static_call_key *key)
+{
+       return !(key->type & 1);
+}
+
+static inline struct static_call_mod *static_call_key_next(struct static_call_key *key)
+{
+       if (!static_call_key_has_mods(key))
+               return NULL;
+
+       return key->mods;
+}
+
+static inline struct static_call_site *static_call_key_sites(struct static_call_key *key)
+{
+       if (static_call_key_has_mods(key))
+               return NULL;
+
+       return (struct static_call_site *)(key->type & ~1);
+}
+
+void __static_call_update(struct static_call_key *key, void *tramp, void *func)
+{
+       struct static_call_site *site, *stop;
+       struct static_call_mod *site_mod, first;
+
+       cpus_read_lock();
+       static_call_lock();
+
+       if (key->func == func)
+               goto done;
+
+       key->func = func;
+
+       arch_static_call_transform(NULL, tramp, func, false);
+
+       /*
+        * If uninitialized, we'll not update the callsites, but they still
+        * point to the trampoline and we just patched that.
+        */
+       if (WARN_ON_ONCE(!static_call_initialized))
+               goto done;
+
+       first = (struct static_call_mod){
+               .next = static_call_key_next(key),
+               .mod = NULL,
+               .sites = static_call_key_sites(key),
+       };
+
+       for (site_mod = &first; site_mod; site_mod = site_mod->next) {
+               bool init = system_state < SYSTEM_RUNNING;
+               struct module *mod = site_mod->mod;
+
+               if (!site_mod->sites) {
+                       /*
+                        * This can happen if the static call key is defined in
+                        * a module which doesn't use it.
+                        *
+                        * It also happens in the has_mods case, where the
+                        * 'first' entry has no sites associated with it.
+                        */
+                       continue;
+               }
+
+               stop = __stop_static_call_sites;
+
+               if (mod) {
+#ifdef CONFIG_MODULES
+                       stop = mod->static_call_sites +
+                              mod->num_static_call_sites;
+                       init = mod->state == MODULE_STATE_COMING;
+#endif
+               }
+
+               for (site = site_mod->sites;
+                    site < stop && static_call_key(site) == key; site++) {
+                       void *site_addr = static_call_addr(site);
+
+                       if (!init && static_call_is_init(site))
+                               continue;
+
+                       if (!kernel_text_address((unsigned long)site_addr)) {
+                               /*
+                                * This skips patching built-in __exit, which
+                                * is part of init_section_contains() but is
+                                * not part of kernel_text_address().
+                                *
+                                * Skipping built-in __exit is fine since it
+                                * will never be executed.
+                                */
+                               WARN_ONCE(!static_call_is_init(site),
+                                         "can't patch static call site at %pS",
+                                         site_addr);
+                               continue;
+                       }
+
+                       arch_static_call_transform(site_addr, NULL, func,
+                                                  static_call_is_tail(site));
+               }
+       }
+
+done:
+       static_call_unlock();
+       cpus_read_unlock();
+}
+EXPORT_SYMBOL_GPL(__static_call_update);
+
+static int __static_call_init(struct module *mod,
+                             struct static_call_site *start,
+                             struct static_call_site *stop)
+{
+       struct static_call_site *site;
+       struct static_call_key *key, *prev_key = NULL;
+       struct static_call_mod *site_mod;
+
+       if (start == stop)
+               return 0;
+
+       static_call_sort_entries(start, stop);
+
+       for (site = start; site < stop; site++) {
+               void *site_addr = static_call_addr(site);
+
+               if ((mod && within_module_init((unsigned long)site_addr, mod)) ||
+                   (!mod && init_section_contains(site_addr, 1)))
+                       static_call_set_init(site);
+
+               key = static_call_key(site);
+               if (key != prev_key) {
+                       prev_key = key;
+
+                       /*
+                        * For vmlinux (!mod) avoid the allocation by storing
+                        * the sites pointer in the key itself. Also see
+                        * __static_call_update()'s @first.
+                        *
+                        * This allows architectures (eg. x86) to call
+                        * static_call_init() before memory allocation works.
+                        */
+                       if (!mod) {
+                               key->sites = site;
+                               key->type |= 1;
+                               goto do_transform;
+                       }
+
+                       site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
+                       if (!site_mod)
+                               return -ENOMEM;
+
+                       /*
+                        * When the key has a direct sites pointer, extract
+                        * that into an explicit struct static_call_mod, so we
+                        * can have a list of modules.
+                        */
+                       if (static_call_key_sites(key)) {
+                               site_mod->mod = NULL;
+                               site_mod->next = NULL;
+                               site_mod->sites = static_call_key_sites(key);
+
+                               key->mods = site_mod;
+
+                               site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
+                               if (!site_mod)
+                                       return -ENOMEM;
+                       }
+
+                       site_mod->mod = mod;
+                       site_mod->sites = site;
+                       site_mod->next = static_call_key_next(key);
+                       key->mods = site_mod;
+               }
+
+do_transform:
+               arch_static_call_transform(site_addr, NULL, key->func,
+                               static_call_is_tail(site));
+       }
+
+       return 0;
+}
+
+static int addr_conflict(struct static_call_site *site, void *start, void *end)
+{
+       unsigned long addr = (unsigned long)static_call_addr(site);
+
+       if (addr <= (unsigned long)end &&
+           addr + CALL_INSN_SIZE > (unsigned long)start)
+               return 1;
+
+       return 0;
+}
+
+static int __static_call_text_reserved(struct static_call_site *iter_start,
+                                      struct static_call_site *iter_stop,
+                                      void *start, void *end, bool init)
+{
+       struct static_call_site *iter = iter_start;
+
+       while (iter < iter_stop) {
+               if (init || !static_call_is_init(iter)) {
+                       if (addr_conflict(iter, start, end))
+                               return 1;
+               }
+               iter++;
+       }
+
+       return 0;
+}
+
+#ifdef CONFIG_MODULES
+
+static int __static_call_mod_text_reserved(void *start, void *end)
+{
+       struct module *mod;
+       int ret;
+
+       preempt_disable();
+       mod = __module_text_address((unsigned long)start);
+       WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
+       if (!try_module_get(mod))
+               mod = NULL;
+       preempt_enable();
+
+       if (!mod)
+               return 0;
+
+       ret = __static_call_text_reserved(mod->static_call_sites,
+                       mod->static_call_sites + mod->num_static_call_sites,
+                       start, end, mod->state == MODULE_STATE_COMING);
+
+       module_put(mod);
+
+       return ret;
+}
+
+static unsigned long tramp_key_lookup(unsigned long addr)
+{
+       struct static_call_tramp_key *start = __start_static_call_tramp_key;
+       struct static_call_tramp_key *stop = __stop_static_call_tramp_key;
+       struct static_call_tramp_key *tramp_key;
+
+       for (tramp_key = start; tramp_key != stop; tramp_key++) {
+               unsigned long tramp;
+
+               tramp = (long)tramp_key->tramp + (long)&tramp_key->tramp;
+               if (tramp == addr)
+                       return (long)tramp_key->key + (long)&tramp_key->key;
+       }
+
+       return 0;
+}
+
+static int static_call_add_module(struct module *mod)
+{
+       struct static_call_site *start = mod->static_call_sites;
+       struct static_call_site *stop = start + mod->num_static_call_sites;
+       struct static_call_site *site;
+
+       for (site = start; site != stop; site++) {
+               unsigned long s_key = __static_call_key(site);
+               unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS;
+               unsigned long key;
+
+               /*
+                * Is the key is exported, 'addr' points to the key, which
+                * means modules are allowed to call static_call_update() on
+                * it.
+                *
+                * Otherwise, the key isn't exported, and 'addr' points to the
+                * trampoline so we need to lookup the key.
+                *
+                * We go through this dance to prevent crazy modules from
+                * abusing sensitive static calls.
+                */
+               if (!kernel_text_address(addr))
+                       continue;
+
+               key = tramp_key_lookup(addr);
+               if (!key) {
+                       pr_warn("Failed to fixup __raw_static_call() usage at: %ps\n",
+                               static_call_addr(site));
+                       return -EINVAL;
+               }
+
+               key |= s_key & STATIC_CALL_SITE_FLAGS;
+               site->key = key - (long)&site->key;
+       }
+
+       return __static_call_init(mod, start, stop);
+}
+
+static void static_call_del_module(struct module *mod)
+{
+       struct static_call_site *start = mod->static_call_sites;
+       struct static_call_site *stop = mod->static_call_sites +
+                                       mod->num_static_call_sites;
+       struct static_call_key *key, *prev_key = NULL;
+       struct static_call_mod *site_mod, **prev;
+       struct static_call_site *site;
+
+       for (site = start; site < stop; site++) {
+               key = static_call_key(site);
+               if (key == prev_key)
+                       continue;
+
+               prev_key = key;
+
+               for (prev = &key->mods, site_mod = key->mods;
+                    site_mod && site_mod->mod != mod;
+                    prev = &site_mod->next, site_mod = site_mod->next)
+                       ;
+
+               if (!site_mod)
+                       continue;
+
+               *prev = site_mod->next;
+               kfree(site_mod);
+       }
+}
+
+static int static_call_module_notify(struct notifier_block *nb,
+                                    unsigned long val, void *data)
+{
+       struct module *mod = data;
+       int ret = 0;
+
+       cpus_read_lock();
+       static_call_lock();
+
+       switch (val) {
+       case MODULE_STATE_COMING:
+               ret = static_call_add_module(mod);
+               if (ret) {
+                       WARN(1, "Failed to allocate memory for static calls");
+                       static_call_del_module(mod);
+               }
+               break;
+       case MODULE_STATE_GOING:
+               static_call_del_module(mod);
+               break;
+       }
+
+       static_call_unlock();
+       cpus_read_unlock();
+
+       return notifier_from_errno(ret);
+}
+
+static struct notifier_block static_call_module_nb = {
+       .notifier_call = static_call_module_notify,
+};
+
+#else
+
+static inline int __static_call_mod_text_reserved(void *start, void *end)
+{
+       return 0;
+}
+
+#endif /* CONFIG_MODULES */
+
+int static_call_text_reserved(void *start, void *end)
+{
+       bool init = system_state < SYSTEM_RUNNING;
+       int ret = __static_call_text_reserved(__start_static_call_sites,
+                       __stop_static_call_sites, start, end, init);
+
+       if (ret)
+               return ret;
+
+       return __static_call_mod_text_reserved(start, end);
+}
+
+int __init static_call_init(void)
+{
+       int ret;
+
+       if (static_call_initialized)
+               return 0;
+
+       cpus_read_lock();
+       static_call_lock();
+       ret = __static_call_init(NULL, __start_static_call_sites,
+                                __stop_static_call_sites);
+       static_call_unlock();
+       cpus_read_unlock();
+
+       if (ret) {
+               pr_err("Failed to allocate memory for static_call!\n");
+               BUG();
+       }
+
+       static_call_initialized = true;
+
+#ifdef CONFIG_MODULES
+       register_module_notifier(&static_call_module_nb);
+#endif
+       return 0;
+}
+early_initcall(static_call_init);
+
+#ifdef CONFIG_STATIC_CALL_SELFTEST
+
+static int func_a(int x)
+{
+       return x+1;
+}
+
+static int func_b(int x)
+{
+       return x+2;
+}
+
+DEFINE_STATIC_CALL(sc_selftest, func_a);
+
+static struct static_call_data {
+      int (*func)(int);
+      int val;
+      int expect;
+} static_call_data [] __initdata = {
+      { NULL,   2, 3 },
+      { func_b, 2, 4 },
+      { func_a, 2, 3 }
+};
+
+static int __init test_static_call_init(void)
+{
+      int i;
+
+      for (i = 0; i < ARRAY_SIZE(static_call_data); i++ ) {
+             struct static_call_data *scd = &static_call_data[i];
+
+              if (scd->func)
+                      static_call_update(sc_selftest, scd->func);
+
+              WARN_ON(static_call(sc_selftest)(scd->val) != scd->expect);
+      }
+
+      return 0;
+}
+early_initcall(test_static_call_init);
+
+#endif /* CONFIG_STATIC_CALL_SELFTEST */
index 2d76c91b85de428aac7bd6627533170f6697ad56..d257721c68b8fc74d85601b713cded1bffded8d7 100644 (file)
@@ -188,7 +188,7 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
         */
        if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) {
 #ifdef CONFIG_NO_HZ_FULL
-               WARN_ON(tick_nohz_full_running);
+               WARN_ON_ONCE(tick_nohz_full_running);
 #endif
                tick_do_timer_cpu = cpu;
        }
@@ -1538,7 +1538,7 @@ void tick_cancel_sched_timer(int cpu)
 }
 #endif
 
-/**
+/*
  * Async notification about clocksource changes
  */
 void tick_clock_notify(void)
@@ -1559,7 +1559,7 @@ void tick_oneshot_notify(void)
        set_bit(0, &ts->check_clocks);
 }
 
-/**
+/*
  * Check, if a change happened, which makes oneshot possible.
  *
  * Called cyclic from the hrtimer softirq (driven by the timer
index 85f1021ad45955026eba845727e53c87693ed2db..9dd2a39cb3b0058e4756ea129cd968195010c00e 100644 (file)
@@ -1722,11 +1722,14 @@ static inline void __run_timers(struct timer_base *base)
               time_after_eq(jiffies, base->next_expiry)) {
                levels = collect_expired_timers(base, heads);
                /*
-                * The only possible reason for not finding any expired
-                * timer at this clk is that all matching timers have been
-                * dequeued.
+                * The two possible reasons for not finding any expired
+                * timer at this clk are that all matching timers have been
+                * dequeued or no timer has been queued since
+                * base::next_expiry was set to base::clk +
+                * NEXT_TIMER_MAX_DELTA.
                 */
-               WARN_ON_ONCE(!levels && !base->next_expiry_recalc);
+               WARN_ON_ONCE(!levels && !base->next_expiry_recalc
+                            && base->timers_pending);
                base->clk++;
                base->next_expiry = __next_timer_interrupt(base);
 
index 9bb54c0b3b2daaf71cee59a20abd520f95d89a2d..2c43e327a619f77b4e5124851f10f58d643f8f46 100644 (file)
@@ -767,6 +767,7 @@ config USER_EVENTS
        bool "User trace events"
        select TRACING
        select DYNAMIC_EVENTS
+       depends on BROKEN || COMPILE_TEST # API needs to be straighten out
        help
          User trace events are user-defined trace events that
          can be used like an existing kernel trace event.  User trace
index 19028e072cdb6a84d42777449c34f24f77f5bdee..8f4fb328133abffc747f7f3794c82ca653a25e5d 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Highly modified by Steven Rostedt (VMware).
  */
+#include <linux/jump_label.h>
 #include <linux/suspend.h>
 #include <linux/ftrace.h>
 #include <linux/slab.h>
 #define ASSIGN_OPS_HASH(opsname, val)
 #endif
 
-static bool kill_ftrace_graph;
+DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph);
 int ftrace_graph_active;
 
 /* Both enabled by default (can be cleared by function_graph tracer flags */
 static bool fgraph_sleep_time = true;
 
-/**
- * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
- *
- * ftrace_graph_stop() is called when a severe error is detected in
- * the function graph tracing. This function is called by the critical
- * paths of function graph to keep those paths from doing any more harm.
- */
-bool ftrace_graph_is_dead(void)
-{
-       return kill_ftrace_graph;
-}
-
 /**
  * ftrace_graph_stop - set to permanently disable function graph tracing
  *
@@ -51,7 +40,7 @@ bool ftrace_graph_is_dead(void)
  */
 void ftrace_graph_stop(void)
 {
-       kill_ftrace_graph = true;
+       static_branch_enable(&kill_ftrace_graph);
 }
 
 /* Add a function return address to the trace stack on thread info.*/
index 8b3d241a31c2606e99f76a33a94fed8092e40971..706e1686b5eb994fd55f4b54a9de77566849cd12 100644 (file)
 #include <linux/tracefs.h>
 #include <linux/types.h>
 #include <linux/uaccess.h>
+/* Reminder to move to uapi when everything works */
+#ifdef CONFIG_COMPILE_TEST
+#include <linux/user_events.h>
+#else
 #include <uapi/linux/user_events.h>
+#endif
 #include "trace.h"
 #include "trace_dynevent.h"
 
@@ -42,9 +47,6 @@
 #define MAX_FIELD_ARRAY_SIZE 1024
 #define MAX_FIELD_ARG_NAME 256
 
-#define MAX_BPF_COPY_SIZE PAGE_SIZE
-#define MAX_STACK_BPF_DATA 512
-
 static char *register_page_data;
 
 static DEFINE_MUTEX(reg_mutex);
@@ -405,19 +407,6 @@ parse:
                                    type[0] != 'u', FILTER_OTHER);
 }
 
-static void user_event_parse_flags(struct user_event *user, char *flags)
-{
-       char *flag;
-
-       if (flags == NULL)
-               return;
-
-       while ((flag = strsep(&flags, ",")) != NULL) {
-               if (strcmp(flag, "BPF_ITER") == 0)
-                       user->flags |= FLAG_BPF_ITER;
-       }
-}
-
 static int user_event_parse_fields(struct user_event *user, char *args)
 {
        char *field;
@@ -713,64 +702,14 @@ discard:
 }
 
 #ifdef CONFIG_PERF_EVENTS
-static void user_event_bpf(struct user_event *user, struct iov_iter *i)
-{
-       struct user_bpf_context context;
-       struct user_bpf_iter bpf_i;
-       char fast_data[MAX_STACK_BPF_DATA];
-       void *temp = NULL;
-
-       if ((user->flags & FLAG_BPF_ITER) && iter_is_iovec(i)) {
-               /* Raw iterator */
-               context.data_type = USER_BPF_DATA_ITER;
-               context.data_len = i->count;
-               context.iter = &bpf_i;
-
-               bpf_i.iov_offset = i->iov_offset;
-               bpf_i.iov = i->iov;
-               bpf_i.nr_segs = i->nr_segs;
-       } else if (i->nr_segs == 1 && iter_is_iovec(i)) {
-               /* Single buffer from user */
-               context.data_type = USER_BPF_DATA_USER;
-               context.data_len = i->count;
-               context.udata = i->iov->iov_base + i->iov_offset;
-       } else {
-               /* Multi buffer from user */
-               struct iov_iter copy = *i;
-               size_t copy_size = min_t(size_t, i->count, MAX_BPF_COPY_SIZE);
-
-               context.data_type = USER_BPF_DATA_KERNEL;
-               context.kdata = fast_data;
-
-               if (unlikely(copy_size > sizeof(fast_data))) {
-                       temp = kmalloc(copy_size, GFP_NOWAIT);
-
-                       if (temp)
-                               context.kdata = temp;
-                       else
-                               copy_size = sizeof(fast_data);
-               }
-
-               context.data_len = copy_nofault(context.kdata,
-                                               copy_size, &copy);
-       }
-
-       trace_call_bpf(&user->call, &context);
-
-       kfree(temp);
-}
-
 /*
- * Writes the user supplied payload out to perf ring buffer or eBPF program.
+ * Writes the user supplied payload out to perf ring buffer.
  */
 static void user_event_perf(struct user_event *user, struct iov_iter *i,
                            void *tpdata, bool *faulted)
 {
        struct hlist_head *perf_head;
 
-       if (bpf_prog_array_valid(&user->call))
-               user_event_bpf(user, i);
-
        perf_head = this_cpu_ptr(user->call.perf_events);
 
        if (perf_head && !hlist_empty(perf_head)) {
@@ -1136,8 +1075,6 @@ static int user_event_parse(char *name, char *args, char *flags,
 
        user->tracepoint.name = name;
 
-       user_event_parse_flags(user, flags);
-
        ret = user_event_parse_fields(user, args);
 
        if (ret)
@@ -1165,11 +1102,11 @@ static int user_event_parse(char *name, char *args, char *flags,
 #endif
 
        mutex_lock(&event_mutex);
+
        ret = user_event_trace_register(user);
-       mutex_unlock(&event_mutex);
 
        if (ret)
-               goto put_user;
+               goto put_user_lock;
 
        user->index = index;
 
@@ -1181,8 +1118,12 @@ static int user_event_parse(char *name, char *args, char *flags,
        set_bit(user->index, page_bitmap);
        hash_add(register_table, &user->node, key);
 
+       mutex_unlock(&event_mutex);
+
        *newuser = user;
        return 0;
+put_user_lock:
+       mutex_unlock(&event_mutex);
 put_user:
        user_event_destroy_fields(user);
        user_event_destroy_validators(user);
@@ -1575,9 +1516,6 @@ static int user_seq_show(struct seq_file *m, void *p)
                        busy++;
                }
 
-               if (flags & FLAG_BPF_ITER)
-                       seq_puts(m, " FLAG:BPF_ITER");
-
                seq_puts(m, "\n");
                active++;
        }
index 3990e4df3d7b00eab7539481394de93b9ecf7be2..230038d4f90818843b5ff69274fea1aa14f5d6ac 100644 (file)
@@ -370,6 +370,7 @@ static void __put_watch_queue(struct kref *kref)
 
        for (i = 0; i < wqueue->nr_pages; i++)
                __free_page(wqueue->notes[i]);
+       kfree(wqueue->notes);
        bitmap_free(wqueue->notes_bitmap);
 
        wfilter = rcu_access_pointer(wqueue->filter);
index 56fa037501b5606833c727450e623ca50f22b6e9..5f0e71ab292cb07e3650a09665afe977d149c7df 100644 (file)
@@ -54,32 +54,6 @@ void kobject_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
                kobj->ktype->get_ownership(kobj, uid, gid);
 }
 
-/*
- * populate_dir - populate directory with attributes.
- * @kobj: object we're working on.
- *
- * Most subsystems have a set of default attributes that are associated
- * with an object that registers with them.  This is a helper called during
- * object registration that loops through the default attributes of the
- * subsystem and creates attributes files for them in sysfs.
- */
-static int populate_dir(struct kobject *kobj)
-{
-       const struct kobj_type *t = get_ktype(kobj);
-       struct attribute *attr;
-       int error = 0;
-       int i;
-
-       if (t && t->default_attrs) {
-               for (i = 0; (attr = t->default_attrs[i]) != NULL; i++) {
-                       error = sysfs_create_file(kobj, attr);
-                       if (error)
-                               break;
-               }
-       }
-       return error;
-}
-
 static int create_dir(struct kobject *kobj)
 {
        const struct kobj_type *ktype = get_ktype(kobj);
@@ -90,12 +64,6 @@ static int create_dir(struct kobject *kobj)
        if (error)
                return error;
 
-       error = populate_dir(kobj);
-       if (error) {
-               sysfs_remove_dir(kobj);
-               return error;
-       }
-
        if (ktype) {
                error = sysfs_create_groups(kobj, ktype->default_groups);
                if (error) {
index 8c3365f26e51d6ddbc76c318ef5092faa1e6c971..b247d412ddef779196746c408dba28f65f239e49 100644 (file)
@@ -68,7 +68,7 @@ int logic_iomem_add_region(struct resource *resource,
 }
 EXPORT_SYMBOL(logic_iomem_add_region);
 
-#ifndef CONFIG_LOGIC_IOMEM_FALLBACK
+#ifndef CONFIG_INDIRECT_IOMEM_FALLBACK
 static void __iomem *real_ioremap(phys_addr_t offset, size_t size)
 {
        WARN(1, "invalid ioremap(0x%llx, 0x%zx)\n",
@@ -81,7 +81,7 @@ static void real_iounmap(volatile void __iomem *addr)
        WARN(1, "invalid iounmap for addr 0x%llx\n",
             (unsigned long long)(uintptr_t __force)addr);
 }
-#endif /* CONFIG_LOGIC_IOMEM_FALLBACK */
+#endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
 
 void __iomem *ioremap(phys_addr_t offset, size_t size)
 {
@@ -168,7 +168,7 @@ void iounmap(volatile void __iomem *addr)
 }
 EXPORT_SYMBOL(iounmap);
 
-#ifndef CONFIG_LOGIC_IOMEM_FALLBACK
+#ifndef CONFIG_INDIRECT_IOMEM_FALLBACK
 #define MAKE_FALLBACK(op, sz)                                          \
 static u##sz real_raw_read ## op(const volatile void __iomem *addr)    \
 {                                                                      \
@@ -213,7 +213,7 @@ static void real_memcpy_toio(volatile void __iomem *addr, const void *buffer,
        WARN(1, "Invalid memcpy_toio at address 0x%llx\n",
             (unsigned long long)(uintptr_t __force)addr);
 }
-#endif /* CONFIG_LOGIC_IOMEM_FALLBACK */
+#endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
 
 #define MAKE_OP(op, sz)                                                \
 u##sz __raw_read ## op(const volatile void __iomem *addr)              \
index 926f4823d5eac832a5b6e78c5c08228d400ca5ad..fd1728d94babb28f997bf10c2b2180ae5cdec2c0 100644 (file)
@@ -271,8 +271,12 @@ static FORCE_INLINE int LZ4_decompress_generic(
                        ip += length;
                        op += length;
 
-                       /* Necessarily EOF, due to parsing restrictions */
-                       if (!partialDecoding || (cpy == oend))
+                       /* Necessarily EOF when !partialDecoding.
+                        * When partialDecoding, it is EOF if we've either
+                        * filled the output buffer or
+                        * can't proceed with reading an offset for following match.
+                        */
+                       if (!partialDecoding || (cpy == oend) || (ip >= (iend - 2)))
                                break;
                } else {
                        /* may overwrite up to WILDCOPYLENGTH beyond cpy */
index 2eb3de18ded3ea7dea97494d16aae13a2107e852..ae4fd4de9ebe781603e27c4c0a3a67a5ba0477df 100644 (file)
@@ -110,7 +110,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
                sb->alloc_hint = NULL;
        }
 
-       sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
+       sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
        if (!sb->map) {
                free_percpu(sb->alloc_hint);
                return -ENOMEM;
index 8b1c318189ce801a0935133b4882556fede5bfd2..e77d4856442c3f750434e37819e13688d210048e 100644 (file)
@@ -1463,6 +1463,25 @@ unlock:
        XA_BUG_ON(xa, !xa_empty(xa));
 }
 
+static noinline void check_create_range_5(struct xarray *xa,
+               unsigned long index, unsigned int order)
+{
+       XA_STATE_ORDER(xas, xa, index, order);
+       unsigned int i;
+
+       xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
+
+       for (i = 0; i < order + 10; i++) {
+               do {
+                       xas_lock(&xas);
+                       xas_create_range(&xas);
+                       xas_unlock(&xas);
+               } while (xas_nomem(&xas, GFP_KERNEL));
+       }
+
+       xa_destroy(xa);
+}
+
 static noinline void check_create_range(struct xarray *xa)
 {
        unsigned int order;
@@ -1490,6 +1509,9 @@ static noinline void check_create_range(struct xarray *xa)
                check_create_range_4(xa, (3U << order) + 1, order);
                check_create_range_4(xa, (3U << order) - 1, order);
                check_create_range_4(xa, (1U << 24) + 1, order);
+
+               check_create_range_5(xa, 0, order);
+               check_create_range_5(xa, (1U << order), order);
        }
 
        check_create_range_3();
index b95e92598b9c4ed00b1a47f28d38b694ac8f5b81..4acc88ea7c21744e6b1faa54af6f2029545a1b32 100644 (file)
@@ -722,6 +722,8 @@ void xas_create_range(struct xa_state *xas)
 
                for (;;) {
                        struct xa_node *node = xas->xa_node;
+                       if (node->shift >= shift)
+                               break;
                        xas->xa_node = xa_parent_locked(xas->xa, node);
                        xas->xa_offset = node->offset - 1;
                        if (node->offset != 0)
@@ -1079,6 +1081,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
                                        xa_mk_node(child));
                        if (xa_is_value(curr))
                                values--;
+                       xas_update(xas, child);
                } else {
                        unsigned int canon = offset - xas->xa_sibs;
 
@@ -1093,6 +1096,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
        } while (offset-- > xas->xa_offset);
 
        node->nr_values += values;
+       xas_update(xas, node);
 }
 EXPORT_SYMBOL_GPL(xas_split);
 #endif
index 907fefde25728039cb3d298c2441099c819c335e..4b8eab4b3f456c3b3c69a299a720bb9b496c9ff6 100644 (file)
@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(balloon_page_dequeue);
 
 #ifdef CONFIG_BALLOON_COMPACTION
 
-bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
+static bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
 
 {
        struct balloon_dev_info *b_dev_info = balloon_page_device(page);
@@ -217,7 +217,7 @@ bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
        return true;
 }
 
-void balloon_page_putback(struct page *page)
+static void balloon_page_putback(struct page *page)
 {
        struct balloon_dev_info *b_dev_info = balloon_page_device(page);
        unsigned long flags;
@@ -230,7 +230,7 @@ void balloon_page_putback(struct page *page)
 
 
 /* move_to_new_page() counterpart for a ballooned page */
-int balloon_page_migrate(struct address_space *mapping,
+static int balloon_page_migrate(struct address_space *mapping,
                struct page *newpage, struct page *page,
                enum migrate_mode mode)
 {
index c3e37aa9ff9e43336cbab1f3a442d016d1dfa75d..fe915db6149b9c4c74a775b1d018b09b8d0fd68d 100644 (file)
 #include "internal.h"
 
 #ifdef CONFIG_COMPACTION
+/*
+ * Fragmentation score check interval for proactive compaction purposes.
+ */
+#define HPAGE_FRAG_CHECK_INTERVAL_MSEC (500)
+
 static inline void count_compact_event(enum vm_event_item item)
 {
        count_vm_event(item);
@@ -50,11 +55,6 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
 #define pageblock_start_pfn(pfn)       block_start_pfn(pfn, pageblock_order)
 #define pageblock_end_pfn(pfn)         block_end_pfn(pfn, pageblock_order)
 
-/*
- * Fragmentation score check interval for proactive compaction purposes.
- */
-static const unsigned int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500;
-
 /*
  * Page order with-respect-to which proactive compaction
  * calculates external fragmentation, which is used as
index c1e0fed4e877f291b1ad8ef363e98dce6d0e8ecb..5ce8d7c867f0410de06a32b769ed0f49ca5316fb 100644 (file)
@@ -1019,12 +1019,15 @@ static int kdamond_wait_activation(struct damon_ctx *ctx)
        struct damos *s;
        unsigned long wait_time;
        unsigned long min_wait_time = 0;
+       bool init_wait_time = false;
 
        while (!kdamond_need_stop(ctx)) {
                damon_for_each_scheme(s, ctx) {
                        wait_time = damos_wmark_wait_us(s);
-                       if (!min_wait_time || wait_time < min_wait_time)
+                       if (!init_wait_time || wait_time < min_wait_time) {
+                               init_wait_time = true;
                                min_wait_time = wait_time;
+                       }
                }
                if (!min_wait_time)
                        return 0;
index 647d72bf23b6385f8ecebdbf5e83ead7f25b3c02..9a1eef6c5d350e6fadb77be23a48c34818d6310e 100644 (file)
@@ -1063,12 +1063,6 @@ void __init pagecache_init(void)
                init_waitqueue_head(&folio_wait_table[i]);
 
        page_writeback_init();
-
-       /*
-        * tmpfs uses the ZERO_PAGE for reading holes: it is up-to-date,
-        * and splice's page_cache_pipe_buf_confirm() needs to see that.
-        */
-       SetPageUptodate(ZERO_PAGE(0));
 }
 
 /*
@@ -2538,7 +2532,7 @@ static int filemap_create_folio(struct file *file,
         * the page cache as the locked folio would then be enough to
         * synchronize with hole punching. But there are code paths
         * such as filemap_update_page() filling in partially uptodate
-        * pages or ->readpages() that need to hold invalidate_lock
+        * pages or ->readahead() that need to hold invalidate_lock
         * while mapping blocks for IO so let's hold the lock here as
         * well to keep locking rules simple.
         */
@@ -3752,9 +3746,10 @@ out:
 }
 EXPORT_SYMBOL(generic_file_direct_write);
 
-ssize_t generic_perform_write(struct file *file,
-                               struct iov_iter *i, loff_t pos)
+ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
 {
+       struct file *file = iocb->ki_filp;
+       loff_t pos = iocb->ki_pos;
        struct address_space *mapping = file->f_mapping;
        const struct address_space_operations *a_ops = mapping->a_ops;
        long status = 0;
@@ -3884,7 +3879,8 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
                        goto out;
 
-               status = generic_perform_write(file, from, pos = iocb->ki_pos);
+               pos = iocb->ki_pos;
+               status = generic_perform_write(iocb, from);
                /*
                 * If generic_perform_write() returned a synchronous error
                 * then we want to return the number of bytes which were
@@ -3916,7 +3912,7 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                         */
                }
        } else {
-               written = generic_perform_write(file, from, iocb->ki_pos);
+               written = generic_perform_write(iocb, from);
                if (likely(written > 0))
                        iocb->ki_pos += written;
        }
index 271fbe8195d712d8babc3375859de1271b472687..f598a037eb04f75799cb7adba885c96d1a2e1ec3 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1404,6 +1404,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
        struct mm_struct *mm = vma->vm_mm;
        unsigned long nr_pages = (end - start) / PAGE_SIZE;
        int gup_flags;
+       long ret;
 
        VM_BUG_ON(!PAGE_ALIGNED(start));
        VM_BUG_ON(!PAGE_ALIGNED(end));
@@ -1438,8 +1439,10 @@ long populate_vma_page_range(struct vm_area_struct *vma,
         * We made sure addr is within a VMA, so the following will
         * not result in a stack expansion that recurses back here.
         */
-       return __get_user_pages(mm, start, nr_pages, gup_flags,
+       ret = __get_user_pages(mm, start, nr_pages, gup_flags,
                                NULL, NULL, locked);
+       lru_add_drain();
+       return ret;
 }
 
 /*
@@ -1471,6 +1474,7 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
        struct mm_struct *mm = vma->vm_mm;
        unsigned long nr_pages = (end - start) / PAGE_SIZE;
        int gup_flags;
+       long ret;
 
        VM_BUG_ON(!PAGE_ALIGNED(start));
        VM_BUG_ON(!PAGE_ALIGNED(end));
@@ -1498,8 +1502,10 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
        if (check_vma_flags(vma, gup_flags))
                return -EINVAL;
 
-       return __get_user_pages(mm, start, nr_pages, gup_flags,
+       ret = __get_user_pages(mm, start, nr_pages, gup_flags,
                                NULL, NULL, locked);
+       lru_add_drain();
+       return ret;
 }
 
 /*
index 0cc0c4da7ed9fdb89db9cc93d94c7bceb56b148a..1a692997fac4c9c913279a10d2077d20ddcb0751 100644 (file)
@@ -624,7 +624,7 @@ void __kmap_local_sched_out(void)
 
                /* With debug all even slots are unmapped and act as guard */
                if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
-                       WARN_ON_ONCE(!pte_none(pteval));
+                       WARN_ON_ONCE(pte_val(pteval) != 0);
                        continue;
                }
                if (WARN_ON_ONCE(pte_none(pteval)))
@@ -661,7 +661,7 @@ void __kmap_local_sched_in(void)
 
                /* With debug all even slots are unmapped and act as guard */
                if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
-                       WARN_ON_ONCE(!pte_none(pteval));
+                       WARN_ON_ONCE(pte_val(pteval) != 0);
                        continue;
                }
                if (WARN_ON_ONCE(pte_none(pteval)))
index 2fe38212e07c665b7a7fe29d575598f371e183c5..c468fee595ffa49952952da40d7a2d70703ac0cc 100644 (file)
@@ -2145,15 +2145,14 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
         * pmd against. Otherwise we can end up replacing wrong folio.
         */
        VM_BUG_ON(freeze && !folio);
-       if (folio) {
-               VM_WARN_ON_ONCE(!folio_test_locked(folio));
-               if (folio != page_folio(pmd_page(*pmd)))
-                       goto out;
-       }
+       VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
 
        if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
-           is_pmd_migration_entry(*pmd))
+           is_pmd_migration_entry(*pmd)) {
+               if (folio && folio != page_folio(pmd_page(*pmd)))
+                       goto out;
                __split_huge_pmd_locked(vma, pmd, range.start, freeze);
+       }
 
 out:
        spin_unlock(ptl);
index b34f50156f7ec29cdfa23006be920afac8954a2b..f8ca7cca3c1ab0f9dea7b67081f55bbdd0e2d0f0 100644 (file)
@@ -3475,7 +3475,6 @@ static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
 {
        int nr_nodes, node;
        struct page *page;
-       int rc = 0;
 
        lockdep_assert_held(&hugetlb_lock);
 
@@ -3486,15 +3485,19 @@ static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
        }
 
        for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
-               if (!list_empty(&h->hugepage_freelists[node])) {
-                       page = list_entry(h->hugepage_freelists[node].next,
-                                       struct page, lru);
-                       rc = demote_free_huge_page(h, page);
-                       break;
+               list_for_each_entry(page, &h->hugepage_freelists[node], lru) {
+                       if (PageHWPoison(page))
+                               continue;
+
+                       return demote_free_huge_page(h, page);
                }
        }
 
-       return rc;
+       /*
+        * Only way to get here is if all pages on free lists are poisoned.
+        * Return -EBUSY so that caller will not retry.
+        */
+       return -EBUSY;
 }
 
 #define HSTATE_ATTR_RO(_name) \
index 58dc6adc19c5edd0e5aeb987b6155cfb8326530e..cf16280ce132187e5652c5d9088bac716f879a27 100644 (file)
@@ -456,7 +456,8 @@ static inline void munlock_vma_page(struct page *page,
 }
 void mlock_new_page(struct page *page);
 bool need_mlock_page_drain(int cpu);
-void mlock_page_drain(int cpu);
+void mlock_page_drain_local(void);
+void mlock_page_drain_remote(int cpu);
 
 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
 
@@ -539,7 +540,8 @@ static inline void munlock_vma_page(struct page *page,
                        struct vm_area_struct *vma, bool compound) { }
 static inline void mlock_new_page(struct page *page) { }
 static inline bool need_mlock_page_drain(int cpu) { return false; }
-static inline void mlock_page_drain(int cpu) { }
+static inline void mlock_page_drain_local(void) { }
+static inline void mlock_page_drain_remote(int cpu) { }
 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
 {
 }
index 07a76c46daa5acf8487ff8bfc1bd9cee317773a0..9e1b6544bfa8e6263fc6a9db58d68b345692e591 100644 (file)
@@ -336,8 +336,6 @@ void __kasan_poison_vmalloc(const void *start, unsigned long size)
 
 #endif
 
-#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
-
 void kasan_enable_tagging(void)
 {
        if (kasan_arg_mode == KASAN_ARG_MODE_ASYNC)
@@ -347,6 +345,9 @@ void kasan_enable_tagging(void)
        else
                hw_enable_tagging_sync();
 }
+
+#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
+
 EXPORT_SYMBOL_GPL(kasan_enable_tagging);
 
 void kasan_force_async_fault(void)
index d79b83d673b144773c8cc9c4eee6eaf7ec45ebde..b01b4bbe040958f3e345306de5cfab5cecb32ab3 100644 (file)
@@ -355,25 +355,27 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
 #define hw_set_mem_tag_range(addr, size, tag, init) \
                        arch_set_mem_tag_range((addr), (size), (tag), (init))
 
+void kasan_enable_tagging(void);
+
 #else /* CONFIG_KASAN_HW_TAGS */
 
 #define hw_enable_tagging_sync()
 #define hw_enable_tagging_async()
 #define hw_enable_tagging_asymm()
 
+static inline void kasan_enable_tagging(void) { }
+
 #endif /* CONFIG_KASAN_HW_TAGS */
 
 #if defined(CONFIG_KASAN_HW_TAGS) && IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
 
-void kasan_enable_tagging(void);
 void kasan_force_async_fault(void);
 
-#else /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */
+#else /* CONFIG_KASAN_HW_TAGS && CONFIG_KASAN_KUNIT_TEST */
 
-static inline void kasan_enable_tagging(void) { }
 static inline void kasan_force_async_fault(void) { }
 
-#endif /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */
+#endif /* CONFIG_KASAN_HW_TAGS && CONFIG_KASAN_KUNIT_TEST */
 
 #ifdef CONFIG_KASAN_SW_TAGS
 u8 kasan_random_tag(void);
index 2f9fdfde19416a1c271b3c45c7f1405807e84f77..9b2b5f56f4aeffa42f2fa2d85c46901768128234 100644 (file)
@@ -231,27 +231,6 @@ static bool kfence_unprotect(unsigned long addr)
        return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
 }
 
-static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
-{
-       long index;
-
-       /* The checks do not affect performance; only called from slow-paths. */
-
-       if (!is_kfence_address((void *)addr))
-               return NULL;
-
-       /*
-        * May be an invalid index if called with an address at the edge of
-        * __kfence_pool, in which case we would report an "invalid access"
-        * error.
-        */
-       index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
-       if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
-               return NULL;
-
-       return &kfence_metadata[index];
-}
-
 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
 {
        unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
@@ -566,6 +545,8 @@ static unsigned long kfence_init_pool(void)
         * enters __slab_free() slow-path.
         */
        for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
+               struct slab *slab = page_slab(&pages[i]);
+
                if (!i || (i % 2))
                        continue;
 
@@ -573,7 +554,11 @@ static unsigned long kfence_init_pool(void)
                if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
                        return addr;
 
-               __SetPageSlab(&pages[i]);
+               __folio_set_slab(slab_folio(slab));
+#ifdef CONFIG_MEMCG
+               slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg |
+                                  MEMCG_DATA_OBJCGS;
+#endif
        }
 
        /*
@@ -1033,6 +1018,9 @@ void __kfence_free(void *addr)
 {
        struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
 
+#ifdef CONFIG_MEMCG
+       KFENCE_WARN_ON(meta->objcg);
+#endif
        /*
         * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
         * the object, as the object page may be recycled for other-typed
index 2a2d5de9d3791624f780193074983e7edd6eb482..600f2e2431d6dc542420ce950956abe46f46d5a7 100644 (file)
@@ -89,10 +89,34 @@ struct kfence_metadata {
        struct kfence_track free_track;
        /* For updating alloc_covered on frees. */
        u32 alloc_stack_hash;
+#ifdef CONFIG_MEMCG
+       struct obj_cgroup *objcg;
+#endif
 };
 
 extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
 
+static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
+{
+       long index;
+
+       /* The checks do not affect performance; only called from slow-paths. */
+
+       if (!is_kfence_address((void *)addr))
+               return NULL;
+
+       /*
+        * May be an invalid index if called with an address at the edge of
+        * __kfence_pool, in which case we would report an "invalid access"
+        * error.
+        */
+       index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
+       if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
+               return NULL;
+
+       return &kfence_metadata[index];
+}
+
 /* KFENCE error types for report generation. */
 enum kfence_error_type {
        KFENCE_ERROR_OOB,               /* Detected a out-of-bounds access. */
index f93a7b2a338be7d6906f51a22ba2faa10615c6cf..f5a6d8ba3e21fef25cee0ec6b449d9ddd2de646d 100644 (file)
@@ -273,3 +273,50 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r
        /* We encountered a memory safety error, taint the kernel! */
        add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK);
 }
+
+#ifdef CONFIG_PRINTK
+static void kfence_to_kp_stack(const struct kfence_track *track, void **kp_stack)
+{
+       int i, j;
+
+       i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
+       for (j = 0; i < track->num_stack_entries && j < KS_ADDRS_COUNT; ++i, ++j)
+               kp_stack[j] = (void *)track->stack_entries[i];
+       if (j < KS_ADDRS_COUNT)
+               kp_stack[j] = NULL;
+}
+
+bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+{
+       struct kfence_metadata *meta = addr_to_metadata((unsigned long)object);
+       unsigned long flags;
+
+       if (!meta)
+               return false;
+
+       /*
+        * If state is UNUSED at least show the pointer requested; the rest
+        * would be garbage data.
+        */
+       kpp->kp_ptr = object;
+
+       /* Requesting info an a never-used object is almost certainly a bug. */
+       if (WARN_ON(meta->state == KFENCE_OBJECT_UNUSED))
+               return true;
+
+       raw_spin_lock_irqsave(&meta->lock, flags);
+
+       kpp->kp_slab = slab;
+       kpp->kp_slab_cache = meta->cache;
+       kpp->kp_objp = (void *)meta->addr;
+       kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack);
+       if (meta->state == KFENCE_OBJECT_FREED)
+               kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack);
+       /* get_stack_skipnr() ensures the first entry is outside allocator. */
+       kpp->kp_ret = kpp->kp_stack[0];
+
+       raw_spin_unlock_irqrestore(&meta->lock, flags);
+
+       return true;
+}
+#endif
index 7580baa76af1c9948aa6af99c52de2429ee10eac..a182f5ddaf68b3bcae74287c1ad33b2a31ccd60d 100644 (file)
@@ -796,6 +796,8 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
        unsigned long flags;
        struct kmemleak_object *object;
        struct kmemleak_scan_area *area = NULL;
+       unsigned long untagged_ptr;
+       unsigned long untagged_objp;
 
        object = find_and_get_object(ptr, 1);
        if (!object) {
@@ -804,6 +806,9 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
                return;
        }
 
+       untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
+       untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
+
        if (scan_area_cache)
                area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
 
@@ -815,8 +820,8 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
                goto out_unlock;
        }
        if (size == SIZE_MAX) {
-               size = object->pointer + object->size - ptr;
-       } else if (ptr + size > object->pointer + object->size) {
+               size = untagged_objp + object->size - untagged_ptr;
+       } else if (untagged_ptr + size > untagged_objp + object->size) {
                kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
                dump_object_info(object);
                kmem_cache_free(scan_area_cache, area);
@@ -1127,7 +1132,7 @@ EXPORT_SYMBOL(kmemleak_no_scan);
 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
                               gfp_t gfp)
 {
-       if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+       if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
                kmemleak_alloc(__va(phys), size, min_count, gfp);
 }
 EXPORT_SYMBOL(kmemleak_alloc_phys);
@@ -1141,7 +1146,7 @@ EXPORT_SYMBOL(kmemleak_alloc_phys);
  */
 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
 {
-       if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+       if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
                kmemleak_free_part(__va(phys), size);
 }
 EXPORT_SYMBOL(kmemleak_free_part_phys);
@@ -1153,7 +1158,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys);
  */
 void __ref kmemleak_not_leak_phys(phys_addr_t phys)
 {
-       if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+       if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
                kmemleak_not_leak(__va(phys));
 }
 EXPORT_SYMBOL(kmemleak_not_leak_phys);
@@ -1165,7 +1170,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys);
  */
 void __ref kmemleak_ignore_phys(phys_addr_t phys)
 {
-       if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+       if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
                kmemleak_ignore(__va(phys));
 }
 EXPORT_SYMBOL(kmemleak_ignore_phys);
index c669d87001a63450de08d1170f7a097612267f4f..ba76428ceecea18d6eab5d88c457169559daade6 100644 (file)
@@ -394,12 +394,6 @@ static void memcg_reparent_list_lru_node(struct list_lru *lru, int nid,
        int dst_idx = dst_memcg->kmemcg_id;
        struct list_lru_one *src, *dst;
 
-       /*
-        * If there is no lru entry in this nlru, we can skip it immediately.
-        */
-       if (!READ_ONCE(nlru->nr_items))
-               return;
-
        /*
         * Since list_lru_{add,del} may be called under an IRQ-safe lock,
         * we have to use IRQ-safe primitives here to avoid deadlock.
index b41858ee937bb85800c1ed75ba297c6c6128ed72..1873616a37d2efffbe49804eb498808377d7fa5f 100644 (file)
@@ -1464,16 +1464,9 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
 
        while (iov_iter_count(&iter)) {
                iovec = iov_iter_iovec(&iter);
-               /*
-                * do_madvise returns ENOMEM if unmapped holes are present
-                * in the passed VMA. process_madvise() is expected to skip
-                * unmapped holes passed to it in the 'struct iovec' list
-                * and not fail because of them. Thus treat -ENOMEM return
-                * from do_madvise as valid and continue processing.
-                */
                ret = do_madvise(mm, (unsigned long)iovec.iov_base,
                                        iovec.iov_len, behavior);
-               if (ret < 0 && ret != -ENOMEM)
+               if (ret < 0)
                        break;
                iov_iter_advance(&iter, iovec.iov_len);
        }
index be44d0b36b186c77c2f96635ecad5992a49701ef..76e3af9639d93fa34993693725aac4350febf07d 100644 (file)
@@ -3918,14 +3918,18 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
                return ret;
 
        if (unlikely(PageHWPoison(vmf->page))) {
+               struct page *page = vmf->page;
                vm_fault_t poisonret = VM_FAULT_HWPOISON;
                if (ret & VM_FAULT_LOCKED) {
+                       if (page_mapped(page))
+                               unmap_mapping_pages(page_mapping(page),
+                                                   page->index, 1, false);
                        /* Retry if a clean page was removed from the cache. */
-                       if (invalidate_inode_page(vmf->page))
-                               poisonret = 0;
-                       unlock_page(vmf->page);
+                       if (invalidate_inode_page(page))
+                               poisonret = VM_FAULT_NOPAGE;
+                       unlock_page(page);
                }
-               put_page(vmf->page);
+               put_page(page);
                vmf->page = NULL;
                return poisonret;
        }
index a2516d31db6ca8b3646d95344d5fdc534bbdcedc..8c74107a2b15e008a7cfa24e95811552f0dbf37e 100644 (file)
@@ -1191,8 +1191,10 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
  */
 static struct page *new_page(struct page *page, unsigned long start)
 {
+       struct folio *dst, *src = page_folio(page);
        struct vm_area_struct *vma;
        unsigned long address;
+       gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;
 
        vma = find_vma(current->mm, start);
        while (vma) {
@@ -1202,24 +1204,19 @@ static struct page *new_page(struct page *page, unsigned long start)
                vma = vma->vm_next;
        }
 
-       if (PageHuge(page)) {
-               return alloc_huge_page_vma(page_hstate(compound_head(page)),
+       if (folio_test_hugetlb(src))
+               return alloc_huge_page_vma(page_hstate(&src->page),
                                vma, address);
-       } else if (PageTransHuge(page)) {
-               struct page *thp;
 
-               thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
-                                        HPAGE_PMD_ORDER);
-               if (!thp)
-                       return NULL;
-               prep_transhuge_page(thp);
-               return thp;
-       }
+       if (folio_test_large(src))
+               gfp = GFP_TRANSHUGE;
+
        /*
-        * if !vma, alloc_page_vma() will use task or system default policy
+        * if !vma, vma_alloc_folio() will use task or system default policy
         */
-       return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
-                       vma, address);
+       dst = vma_alloc_folio(gfp, folio_order(src), vma, address,
+                       folio_test_large(src));
+       return &dst->page;
 }
 #else
 
@@ -2227,6 +2224,19 @@ out:
 }
 EXPORT_SYMBOL(alloc_pages_vma);
 
+struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
+               unsigned long addr, bool hugepage)
+{
+       struct folio *folio;
+
+       folio = (struct folio *)alloc_pages_vma(gfp, order, vma, addr,
+                       hugepage);
+       if (folio && order > 1)
+               prep_transhuge_page(&folio->page);
+
+       return folio;
+}
+
 /**
  * alloc_pages - Allocate pages.
  * @gfp: GFP flags.
@@ -2733,6 +2743,7 @@ alloc_new:
        mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
        if (!mpol_new)
                goto err_out;
+       atomic_set(&mpol_new->refcnt, 1);
        goto restart;
 }
 
index 3d60823afd2d3171fafe7ec3e3279e7156850ddd..6c31ee1e1c9b061b70bf99a3463fcf9ea07686ed 100644 (file)
@@ -246,7 +246,7 @@ static bool remove_migration_pte(struct folio *folio,
                        set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
                }
                if (vma->vm_flags & VM_LOCKED)
-                       mlock_page_drain(smp_processor_id());
+                       mlock_page_drain_local();
 
                trace_remove_migration_pte(pvmw.address, pte_val(pte),
                                           compound_order(new));
@@ -1520,10 +1520,11 @@ out:
 
 struct page *alloc_migration_target(struct page *page, unsigned long private)
 {
+       struct folio *folio = page_folio(page);
        struct migration_target_control *mtc;
        gfp_t gfp_mask;
        unsigned int order = 0;
-       struct page *new_page = NULL;
+       struct folio *new_folio = NULL;
        int nid;
        int zidx;
 
@@ -1531,34 +1532,31 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
        gfp_mask = mtc->gfp_mask;
        nid = mtc->nid;
        if (nid == NUMA_NO_NODE)
-               nid = page_to_nid(page);
+               nid = folio_nid(folio);
 
-       if (PageHuge(page)) {
-               struct hstate *h = page_hstate(compound_head(page));
+       if (folio_test_hugetlb(folio)) {
+               struct hstate *h = page_hstate(&folio->page);
 
                gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
                return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
        }
 
-       if (PageTransHuge(page)) {
+       if (folio_test_large(folio)) {
                /*
                 * clear __GFP_RECLAIM to make the migration callback
                 * consistent with regular THP allocations.
                 */
                gfp_mask &= ~__GFP_RECLAIM;
                gfp_mask |= GFP_TRANSHUGE;
-               order = HPAGE_PMD_ORDER;
+               order = folio_order(folio);
        }
-       zidx = zone_idx(page_zone(page));
+       zidx = zone_idx(folio_zone(folio));
        if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
                gfp_mask |= __GFP_HIGHMEM;
 
-       new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask);
-
-       if (new_page && PageTransHuge(new_page))
-               prep_transhuge_page(new_page);
+       new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask);
 
-       return new_page;
+       return &new_folio->page;
 }
 
 #ifdef CONFIG_NUMA
@@ -1999,32 +1997,20 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
                                           unsigned long data)
 {
        int nid = (int) data;
-       struct page *newpage;
-
-       newpage = __alloc_pages_node(nid,
-                                        (GFP_HIGHUSER_MOVABLE |
-                                         __GFP_THISNODE | __GFP_NOMEMALLOC |
-                                         __GFP_NORETRY | __GFP_NOWARN) &
-                                        ~__GFP_RECLAIM, 0);
-
-       return newpage;
-}
-
-static struct page *alloc_misplaced_dst_page_thp(struct page *page,
-                                                unsigned long data)
-{
-       int nid = (int) data;
-       struct page *newpage;
-
-       newpage = alloc_pages_node(nid, (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
-                                  HPAGE_PMD_ORDER);
-       if (!newpage)
-               goto out;
-
-       prep_transhuge_page(newpage);
+       int order = compound_order(page);
+       gfp_t gfp = __GFP_THISNODE;
+       struct folio *new;
+
+       if (order > 0)
+               gfp |= GFP_TRANSHUGE_LIGHT;
+       else {
+               gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
+                       __GFP_NOWARN;
+               gfp &= ~__GFP_RECLAIM;
+       }
+       new = __folio_alloc_node(gfp, order, nid);
 
-out:
-       return newpage;
+       return &new->page;
 }
 
 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
@@ -2082,22 +2068,8 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
        int nr_remaining;
        unsigned int nr_succeeded;
        LIST_HEAD(migratepages);
-       new_page_t *new;
-       bool compound;
        int nr_pages = thp_nr_pages(page);
 
-       /*
-        * PTE mapped THP or HugeTLB page can't reach here so the page could
-        * be either base page or THP.  And it must be head page if it is
-        * THP.
-        */
-       compound = PageTransHuge(page);
-
-       if (compound)
-               new = alloc_misplaced_dst_page_thp;
-       else
-               new = alloc_misplaced_dst_page;
-
        /*
         * Don't migrate file pages that are mapped in multiple processes
         * with execute permissions as they are probably shared libraries.
@@ -2118,9 +2090,9 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
                goto out;
 
        list_add(&page->lru, &migratepages);
-       nr_remaining = migrate_pages(&migratepages, *new, NULL, node,
-                                    MIGRATE_ASYNC, MR_NUMA_MISPLACED,
-                                    &nr_succeeded);
+       nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
+                                    NULL, node, MIGRATE_ASYNC,
+                                    MR_NUMA_MISPLACED, &nr_succeeded);
        if (nr_remaining) {
                if (!list_empty(&migratepages)) {
                        list_del(&page->lru);
index 529fbc1f27c88a010eb11261c90acfaa5663c67b..716caf851043121dcc8bd94542b6ada0a2acd7c7 100644 (file)
 
 #include "internal.h"
 
-static DEFINE_PER_CPU(struct pagevec, mlock_pvec);
+struct mlock_pvec {
+       local_lock_t lock;
+       struct pagevec vec;
+};
+
+static DEFINE_PER_CPU(struct mlock_pvec, mlock_pvec) = {
+       .lock = INIT_LOCAL_LOCK(lock),
+};
 
 bool can_do_mlock(void)
 {
@@ -203,18 +210,30 @@ static void mlock_pagevec(struct pagevec *pvec)
        pagevec_reinit(pvec);
 }
 
-void mlock_page_drain(int cpu)
+void mlock_page_drain_local(void)
+{
+       struct pagevec *pvec;
+
+       local_lock(&mlock_pvec.lock);
+       pvec = this_cpu_ptr(&mlock_pvec.vec);
+       if (pagevec_count(pvec))
+               mlock_pagevec(pvec);
+       local_unlock(&mlock_pvec.lock);
+}
+
+void mlock_page_drain_remote(int cpu)
 {
        struct pagevec *pvec;
 
-       pvec = &per_cpu(mlock_pvec, cpu);
+       WARN_ON_ONCE(cpu_online(cpu));
+       pvec = &per_cpu(mlock_pvec.vec, cpu);
        if (pagevec_count(pvec))
                mlock_pagevec(pvec);
 }
 
 bool need_mlock_page_drain(int cpu)
 {
-       return pagevec_count(&per_cpu(mlock_pvec, cpu));
+       return pagevec_count(&per_cpu(mlock_pvec.vec, cpu));
 }
 
 /**
@@ -223,7 +242,10 @@ bool need_mlock_page_drain(int cpu)
  */
 void mlock_folio(struct folio *folio)
 {
-       struct pagevec *pvec = &get_cpu_var(mlock_pvec);
+       struct pagevec *pvec;
+
+       local_lock(&mlock_pvec.lock);
+       pvec = this_cpu_ptr(&mlock_pvec.vec);
 
        if (!folio_test_set_mlocked(folio)) {
                int nr_pages = folio_nr_pages(folio);
@@ -236,7 +258,7 @@ void mlock_folio(struct folio *folio)
        if (!pagevec_add(pvec, mlock_lru(&folio->page)) ||
            folio_test_large(folio) || lru_cache_disabled())
                mlock_pagevec(pvec);
-       put_cpu_var(mlock_pvec);
+       local_unlock(&mlock_pvec.lock);
 }
 
 /**
@@ -245,9 +267,11 @@ void mlock_folio(struct folio *folio)
  */
 void mlock_new_page(struct page *page)
 {
-       struct pagevec *pvec = &get_cpu_var(mlock_pvec);
+       struct pagevec *pvec;
        int nr_pages = thp_nr_pages(page);
 
+       local_lock(&mlock_pvec.lock);
+       pvec = this_cpu_ptr(&mlock_pvec.vec);
        SetPageMlocked(page);
        mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
        __count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
@@ -256,7 +280,7 @@ void mlock_new_page(struct page *page)
        if (!pagevec_add(pvec, mlock_new(page)) ||
            PageHead(page) || lru_cache_disabled())
                mlock_pagevec(pvec);
-       put_cpu_var(mlock_pvec);
+       local_unlock(&mlock_pvec.lock);
 }
 
 /**
@@ -265,8 +289,10 @@ void mlock_new_page(struct page *page)
  */
 void munlock_page(struct page *page)
 {
-       struct pagevec *pvec = &get_cpu_var(mlock_pvec);
+       struct pagevec *pvec;
 
+       local_lock(&mlock_pvec.lock);
+       pvec = this_cpu_ptr(&mlock_pvec.vec);
        /*
         * TestClearPageMlocked(page) must be left to __munlock_page(),
         * which will check whether the page is multiply mlocked.
@@ -276,7 +302,7 @@ void munlock_page(struct page *page)
        if (!pagevec_add(pvec, page) ||
            PageHead(page) || lru_cache_disabled())
                mlock_pagevec(pvec);
-       put_cpu_var(mlock_pvec);
+       local_unlock(&mlock_pvec.lock);
 }
 
 static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
index 9d76da79594d9084b5ee3a1153243b0d6561afaa..303d3290b938667699e0ab61cbd8e6ac81c286eb 100644 (file)
@@ -486,6 +486,9 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
        pmd_t *old_pmd, *new_pmd;
        pud_t *old_pud, *new_pud;
 
+       if (!len)
+               return 0;
+
        old_end = old_addr + len;
        flush_cache_range(vma, old_addr, old_end);
 
index 6c6af8658775733f345b5528b81f32e77570b5da..33ca8cab21e6ed6bc531af9dbba2bff5aedf8457 100644 (file)
@@ -128,7 +128,7 @@ static DEFINE_MUTEX(pcp_batch_high_lock);
 struct pagesets {
        local_lock_t lock;
 };
-static DEFINE_PER_CPU(struct pagesets, pagesets) __maybe_unused = {
+static DEFINE_PER_CPU(struct pagesets, pagesets) = {
        .lock = INIT_LOCAL_LOCK(lock),
 };
 
@@ -6131,7 +6131,7 @@ static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
        do {
                zone_type--;
                zone = pgdat->node_zones + zone_type;
-               if (managed_zone(zone)) {
+               if (populated_zone(zone)) {
                        zoneref_set_zone(zone, &zonerefs[nr_zones++]);
                        check_highest_zone(zone_type);
                }
@@ -8367,6 +8367,7 @@ static int page_alloc_cpu_dead(unsigned int cpu)
        struct zone *zone;
 
        lru_add_drain_cpu(cpu);
+       mlock_page_drain_remote(cpu);
        drain_pages(cpu);
 
        /*
index b417f000b49eb1aa3b776ba7a030d10828119493..89fbf3cae30f7cf58a0a1b83970ed8b17d627507 100644 (file)
@@ -51,54 +51,6 @@ void end_swap_bio_write(struct bio *bio)
        bio_put(bio);
 }
 
-static void swap_slot_free_notify(struct page *page)
-{
-       struct swap_info_struct *sis;
-       struct gendisk *disk;
-       swp_entry_t entry;
-
-       /*
-        * There is no guarantee that the page is in swap cache - the software
-        * suspend code (at least) uses end_swap_bio_read() against a non-
-        * swapcache page.  So we must check PG_swapcache before proceeding with
-        * this optimization.
-        */
-       if (unlikely(!PageSwapCache(page)))
-               return;
-
-       sis = page_swap_info(page);
-       if (data_race(!(sis->flags & SWP_BLKDEV)))
-               return;
-
-       /*
-        * The swap subsystem performs lazy swap slot freeing,
-        * expecting that the page will be swapped out again.
-        * So we can avoid an unnecessary write if the page
-        * isn't redirtied.
-        * This is good for real swap storage because we can
-        * reduce unnecessary I/O and enhance wear-leveling
-        * if an SSD is used as the as swap device.
-        * But if in-memory swap device (eg zram) is used,
-        * this causes a duplicated copy between uncompressed
-        * data in VM-owned memory and compressed data in
-        * zram-owned memory.  So let's free zram-owned memory
-        * and make the VM-owned decompressed page *dirty*,
-        * so the page should be swapped out somewhere again if
-        * we again wish to reclaim it.
-        */
-       disk = sis->bdev->bd_disk;
-       entry.val = page_private(page);
-       if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) {
-               unsigned long offset;
-
-               offset = swp_offset(entry);
-
-               SetPageDirty(page);
-               disk->fops->swap_slot_free_notify(sis->bdev,
-                               offset);
-       }
-}
-
 static void end_swap_bio_read(struct bio *bio)
 {
        struct page *page = bio_first_page_all(bio);
@@ -114,7 +66,6 @@ static void end_swap_bio_read(struct bio *bio)
        }
 
        SetPageUptodate(page);
-       swap_slot_free_notify(page);
 out:
        unlock_page(page);
        WRITE_ONCE(bio->bi_private, NULL);
@@ -394,11 +345,6 @@ int swap_readpage(struct page *page, bool synchronous)
        if (sis->flags & SWP_SYNCHRONOUS_IO) {
                ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
                if (!ret) {
-                       if (trylock_page(page)) {
-                               swap_slot_free_notify(page);
-                               unlock_page(page);
-                       }
-
                        count_vm_event(PSWPIN);
                        goto out;
                }
index 1187f9c1ec5b1048a2c6a8cbd2a0e6341f19d400..14a5cda73dee62759d5c3dc52a26f13f6e624e43 100644 (file)
@@ -163,7 +163,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
                return not_found(pvmw);
 
        if (unlikely(is_vm_hugetlb_page(vma))) {
-               unsigned long size = pvmw->nr_pages * PAGE_SIZE;
+               struct hstate *hstate = hstate_vma(vma);
+               unsigned long size = huge_page_size(hstate);
                /* The only possible mapping was handled on last iteration */
                if (pvmw->pte)
                        return not_found(pvmw);
@@ -173,8 +174,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
                if (!pvmw->pte)
                        return false;
 
-               pvmw->ptl = huge_pte_lockptr(size_to_hstate(size), mm,
-                                               pvmw->pte);
+               pvmw->ptl = huge_pte_lockptr(hstate, mm, pvmw->pte);
                spin_lock(pvmw->ptl);
                if (!check_pte(pvmw))
                        return not_found(pvmw);
index d3a47546d17d9ed618a452ad11966875d2ccd3aa..8e3775829513ee7caec5b45e0c51df8ea517e5f9 100644 (file)
  *
  * Readahead is used to read content into the page cache before it is
  * explicitly requested by the application.  Readahead only ever
- * attempts to read pages that are not yet in the page cache.  If a
- * page is present but not up-to-date, readahead will not try to read
+ * attempts to read folios that are not yet in the page cache.  If a
+ * folio is present but not up-to-date, readahead will not try to read
  * it. In that case a simple ->readpage() will be requested.
  *
  * Readahead is triggered when an application read request (whether a
- * systemcall or a page fault) finds that the requested page is not in
+ * system call or a page fault) finds that the requested folio is not in
  * the page cache, or that it is in the page cache and has the
- * %PG_readahead flag set.  This flag indicates that the page was loaded
- * as part of a previous read-ahead request and now that it has been
- * accessed, it is time for the next read-ahead.
+ * readahead flag set.  This flag indicates that the folio was read
+ * as part of a previous readahead request and now that it has been
+ * accessed, it is time for the next readahead.
  *
  * Each readahead request is partly synchronous read, and partly async
- * read-ahead.  This is reflected in the struct file_ra_state which
- * contains ->size being to total number of pages, and ->async_size
- * which is the number of pages in the async section.  The first page in
- * this async section will have %PG_readahead set as a trigger for a
- * subsequent read ahead.  Once a series of sequential reads has been
+ * readahead.  This is reflected in the struct file_ra_state which
+ * contains ->size being the total number of pages, and ->async_size
+ * which is the number of pages in the async section.  The readahead
+ * flag will be set on the first folio in this async section to trigger
+ * a subsequent readahead.  Once a series of sequential reads has been
  * established, there should be no need for a synchronous component and
- * all read ahead request will be fully asynchronous.
+ * all readahead request will be fully asynchronous.
  *
- * When either of the triggers causes a readahead, three numbers need to
- * be determined: the start of the region, the size of the region, and
- * the size of the async tail.
+ * When either of the triggers causes a readahead, three numbers need
+ * to be determined: the start of the region to read, the size of the
+ * region, and the size of the async tail.
  *
  * The start of the region is simply the first page address at or after
  * the accessed address, which is not currently populated in the page
  * was explicitly requested from the determined request size, unless
  * this would be less than zero - then zero is used.  NOTE THIS
  * CALCULATION IS WRONG WHEN THE START OF THE REGION IS NOT THE ACCESSED
- * PAGE.
+ * PAGE.  ALSO THIS CALCULATION IS NOT USED CONSISTENTLY.
  *
  * The size of the region is normally determined from the size of the
  * previous readahead which loaded the preceding pages.  This may be
  * discovered from the struct file_ra_state for simple sequential reads,
  * or from examining the state of the page cache when multiple
  * sequential reads are interleaved.  Specifically: where the readahead
- * was triggered by the %PG_readahead flag, the size of the previous
+ * was triggered by the readahead flag, the size of the previous
  * readahead is assumed to be the number of pages from the triggering
  * page to the start of the new readahead.  In these cases, the size of
  * the previous readahead is scaled, often doubled, for the new
  * larger than the current request, and it is not scaled up, unless it
  * is at the start of file.
  *
- * In general read ahead is accelerated at the start of the file, as
+ * In general readahead is accelerated at the start of the file, as
  * reads from there are often sequential.  There are other minor
- * adjustments to the read ahead size in various special cases and these
+ * adjustments to the readahead size in various special cases and these
  * are best discovered by reading the code.
  *
- * The above calculation determines the readahead, to which any requested
- * read size may be added.
+ * The above calculation, based on the previous readahead size,
+ * determines the size of the readahead, to which any requested read
+ * size may be added.
  *
  * Readahead requests are sent to the filesystem using the ->readahead()
  * address space operation, for which mpage_readahead() is a canonical
  * implementation.  ->readahead() should normally initiate reads on all
- * pages, but may fail to read any or all pages without causing an IO
+ * folios, but may fail to read any or all folios without causing an I/O
  * error.  The page cache reading code will issue a ->readpage() request
- * for any page which ->readahead() does not provided, and only an error
+ * for any folio which ->readahead() did not read, and only an error
  * from this will be final.
  *
- * ->readahead() will generally call readahead_page() repeatedly to get
- * each page from those prepared for read ahead.  It may fail to read a
- * page by:
+ * ->readahead() will generally call readahead_folio() repeatedly to get
+ * each folio from those prepared for readahead.  It may fail to read a
+ * folio by:
  *
- * * not calling readahead_page() sufficiently many times, effectively
- *   ignoring some pages, as might be appropriate if the path to
+ * * not calling readahead_folio() sufficiently many times, effectively
+ *   ignoring some folios, as might be appropriate if the path to
  *   storage is congested.
  *
- * * failing to actually submit a read request for a given page,
+ * * failing to actually submit a read request for a given folio,
  *   possibly due to insufficient resources, or
  *
  * * getting an error during subsequent processing of a request.
  *
- * In the last two cases, the page should be unlocked to indicate that
- * the read attempt has failed.  In the first case the page will be
- * unlocked by the caller.
+ * In the last two cases, the folio should be unlocked by the filesystem
+ * to indicate that the read attempt has failed.  In the first case the
+ * folio will be unlocked by the VFS.
  *
- * Those pages not in the final ``async_size`` of the request should be
+ * Those folios not in the final ``async_size`` of the request should be
  * considered to be important and ->readahead() should not fail them due
  * to congestion or temporary resource unavailability, but should wait
  * for necessary resources (e.g.  memory or indexing information) to
- * become available.  Pages in the final ``async_size`` may be
+ * become available.  Folios in the final ``async_size`` may be
  * considered less urgent and failure to read them is more acceptable.
- * In this case it is best to use delete_from_page_cache() to remove the
- * pages from the page cache as is automatically done for pages that
- * were not fetched with readahead_page().  This will allow a
- * subsequent synchronous read ahead request to try them again.  If they
+ * In this case it is best to use filemap_remove_folio() to remove the
+ * folios from the page cache as is automatically done for folios that
+ * were not fetched with readahead_folio().  This will allow a
+ * subsequent synchronous readahead request to try them again.  If they
  * are left in the page cache, then they will be read individually using
- * ->readpage().
- *
+ * ->readpage() which may be less efficient.
  */
 
 #include <linux/kernel.h>
@@ -142,91 +142,14 @@ file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
 }
 EXPORT_SYMBOL_GPL(file_ra_state_init);
 
-/*
- * see if a page needs releasing upon read_cache_pages() failure
- * - the caller of read_cache_pages() may have set PG_private or PG_fscache
- *   before calling, such as the NFS fs marking pages that are cached locally
- *   on disk, thus we need to give the fs a chance to clean up in the event of
- *   an error
- */
-static void read_cache_pages_invalidate_page(struct address_space *mapping,
-                                            struct page *page)
-{
-       if (page_has_private(page)) {
-               if (!trylock_page(page))
-                       BUG();
-               page->mapping = mapping;
-               folio_invalidate(page_folio(page), 0, PAGE_SIZE);
-               page->mapping = NULL;
-               unlock_page(page);
-       }
-       put_page(page);
-}
-
-/*
- * release a list of pages, invalidating them first if need be
- */
-static void read_cache_pages_invalidate_pages(struct address_space *mapping,
-                                             struct list_head *pages)
-{
-       struct page *victim;
-
-       while (!list_empty(pages)) {
-               victim = lru_to_page(pages);
-               list_del(&victim->lru);
-               read_cache_pages_invalidate_page(mapping, victim);
-       }
-}
-
-/**
- * read_cache_pages - populate an address space with some pages & start reads against them
- * @mapping: the address_space
- * @pages: The address of a list_head which contains the target pages.  These
- *   pages have their ->index populated and are otherwise uninitialised.
- * @filler: callback routine for filling a single page.
- * @data: private data for the callback routine.
- *
- * Hides the details of the LRU cache etc from the filesystems.
- *
- * Returns: %0 on success, error return by @filler otherwise
- */
-int read_cache_pages(struct address_space *mapping, struct list_head *pages,
-                       int (*filler)(void *, struct page *), void *data)
-{
-       struct page *page;
-       int ret = 0;
-
-       while (!list_empty(pages)) {
-               page = lru_to_page(pages);
-               list_del(&page->lru);
-               if (add_to_page_cache_lru(page, mapping, page->index,
-                               readahead_gfp_mask(mapping))) {
-                       read_cache_pages_invalidate_page(mapping, page);
-                       continue;
-               }
-               put_page(page);
-
-               ret = filler(data, page);
-               if (unlikely(ret)) {
-                       read_cache_pages_invalidate_pages(mapping, pages);
-                       break;
-               }
-               task_io_account_read(PAGE_SIZE);
-       }
-       return ret;
-}
-
-EXPORT_SYMBOL(read_cache_pages);
-
-static void read_pages(struct readahead_control *rac, struct list_head *pages,
-               bool skip_page)
+static void read_pages(struct readahead_control *rac)
 {
        const struct address_space_operations *aops = rac->mapping->a_ops;
        struct page *page;
        struct blk_plug plug;
 
        if (!readahead_count(rac))
-               goto out;
+               return;
 
        blk_start_plug(&plug);
 
@@ -234,7 +157,7 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
                aops->readahead(rac);
                /*
                 * Clean up the remaining pages.  The sizes in ->ra
-                * maybe be used to size next read-ahead, so make sure
+                * may be used to size the next readahead, so make sure
                 * they accurately reflect what happened.
                 */
                while ((page = readahead_page(rac))) {
@@ -246,13 +169,6 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
                        unlock_page(page);
                        put_page(page);
                }
-       } else if (aops->readpages) {
-               aops->readpages(rac->file, rac->mapping, pages,
-                               readahead_count(rac));
-               /* Clean up the remaining pages */
-               put_pages_list(pages);
-               rac->_index += rac->_nr_pages;
-               rac->_nr_pages = 0;
        } else {
                while ((page = readahead_page(rac))) {
                        aops->readpage(rac->file, page);
@@ -262,12 +178,7 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
 
        blk_finish_plug(&plug);
 
-       BUG_ON(pages && !list_empty(pages));
        BUG_ON(readahead_count(rac));
-
-out:
-       if (skip_page)
-               rac->_index++;
 }
 
 /**
@@ -289,7 +200,6 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
 {
        struct address_space *mapping = ractl->mapping;
        unsigned long index = readahead_index(ractl);
-       LIST_HEAD(page_pool);
        gfp_t gfp_mask = readahead_gfp_mask(mapping);
        unsigned long i;
 
@@ -321,7 +231,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
                         * have a stable reference to this page, and it's
                         * not worth getting one just for that.
                         */
-                       read_pages(ractl, &page_pool, true);
+                       read_pages(ractl);
+                       ractl->_index++;
                        i = ractl->_index + ractl->_nr_pages - index - 1;
                        continue;
                }
@@ -329,13 +240,11 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
                folio = filemap_alloc_folio(gfp_mask, 0);
                if (!folio)
                        break;
-               if (mapping->a_ops->readpages) {
-                       folio->index = index + i;
-                       list_add(&folio->lru, &page_pool);
-               } else if (filemap_add_folio(mapping, folio, index + i,
+               if (filemap_add_folio(mapping, folio, index + i,
                                        gfp_mask) < 0) {
                        folio_put(folio);
-                       read_pages(ractl, &page_pool, true);
+                       read_pages(ractl);
+                       ractl->_index++;
                        i = ractl->_index + ractl->_nr_pages - index - 1;
                        continue;
                }
@@ -349,7 +258,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
         * uptodate then the caller will launch readpage again, and
         * will then handle the error.
         */
-       read_pages(ractl, &page_pool, false);
+       read_pages(ractl);
        filemap_invalidate_unlock_shared(mapping);
        memalloc_nofs_restore(nofs);
 }
@@ -394,8 +303,7 @@ void force_page_cache_ra(struct readahead_control *ractl,
        struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
        unsigned long max_pages, index;
 
-       if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages &&
-                       !mapping->a_ops->readahead))
+       if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readahead))
                return;
 
        /*
@@ -512,7 +420,7 @@ static pgoff_t count_history_pages(struct address_space *mapping,
 }
 
 /*
- * page cache context based read-ahead
+ * page cache context based readahead
  */
 static int try_context_readahead(struct address_space *mapping,
                                 struct file_ra_state *ra,
@@ -624,7 +532,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
                ra->async_size += index - limit - 1;
        }
 
-       read_pages(ractl, NULL, false);
+       read_pages(ractl);
 
        /*
         * If there were already pages in the page cache, then we may have
@@ -763,9 +671,9 @@ void page_cache_sync_ra(struct readahead_control *ractl,
        bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM);
 
        /*
-        * Even if read-ahead is disabled, issue this request as read-ahead
+        * Even if readahead is disabled, issue this request as readahead
         * as we'll need it to satisfy the requested range. The forced
-        * read-ahead will do the right thing and limit the read to just the
+        * readahead will do the right thing and limit the read to just the
         * requested range, which we'll set to 1 page for this case.
         */
        if (!ractl->ra->ra_pages || blk_cgroup_congested()) {
@@ -781,7 +689,6 @@ void page_cache_sync_ra(struct readahead_control *ractl,
                return;
        }
 
-       /* do read-ahead */
        ondemand_readahead(ractl, NULL, req_count);
 }
 EXPORT_SYMBOL_GPL(page_cache_sync_ra);
@@ -789,7 +696,7 @@ EXPORT_SYMBOL_GPL(page_cache_sync_ra);
 void page_cache_async_ra(struct readahead_control *ractl,
                struct folio *folio, unsigned long req_count)
 {
-       /* no read-ahead */
+       /* no readahead */
        if (!ractl->ra->ra_pages)
                return;
 
@@ -804,7 +711,6 @@ void page_cache_async_ra(struct readahead_control *ractl,
        if (blk_cgroup_congested())
                return;
 
-       /* do read-ahead */
        ondemand_readahead(ractl, folio, req_count);
 }
 EXPORT_SYMBOL_GPL(page_cache_async_ra);
index 5cb970d51f0a23fe8e99a0279b86f8ab012f8007..fedb82371efed2de7aadd5066e5fcbc4600a494b 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1683,7 +1683,7 @@ discard:
                 */
                page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
                if (vma->vm_flags & VM_LOCKED)
-                       mlock_page_drain(smp_processor_id());
+                       mlock_page_drain_local();
                folio_put(folio);
        }
 
@@ -1961,7 +1961,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                 */
                page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
                if (vma->vm_flags & VM_LOCKED)
-                       mlock_page_drain(smp_processor_id());
+                       mlock_page_drain_local();
                folio_put(folio);
        }
 
index 098638d3b8a41c08a6818e5bac4acccf75d80df3..3b3cf2892b6ae8a7f5da0ce28030ac45e09b51ca 100644 (file)
@@ -158,6 +158,22 @@ const struct address_space_operations secretmem_aops = {
        .isolate_page   = secretmem_isolate_page,
 };
 
+static int secretmem_setattr(struct user_namespace *mnt_userns,
+                            struct dentry *dentry, struct iattr *iattr)
+{
+       struct inode *inode = d_inode(dentry);
+       unsigned int ia_valid = iattr->ia_valid;
+
+       if ((ia_valid & ATTR_SIZE) && inode->i_size)
+               return -EINVAL;
+
+       return simple_setattr(mnt_userns, dentry, iattr);
+}
+
+static const struct inode_operations secretmem_iops = {
+       .setattr = secretmem_setattr,
+};
+
 static struct vfsmount *secretmem_mnt;
 
 static struct file *secretmem_file_create(unsigned long flags)
@@ -177,6 +193,7 @@ static struct file *secretmem_file_create(unsigned long flags)
        mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
        mapping_set_unevictable(inode->i_mapping);
 
+       inode->i_op = &secretmem_iops;
        inode->i_mapping->a_ops = &secretmem_aops;
 
        /* pretend we are a normal file with zero size */
index 529c9ad3e9264340ee94f2792bb24b5c19647aba..4b2fea33158e8a33a87354ccefc574ef74c1e793 100644 (file)
@@ -2513,7 +2513,6 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
                pgoff_t end_index;
                unsigned long nr, ret;
                loff_t i_size = i_size_read(inode);
-               bool got_page;
 
                end_index = i_size >> PAGE_SHIFT;
                if (index > end_index)
@@ -2570,24 +2569,34 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
                         */
                        if (!offset)
                                mark_page_accessed(page);
-                       got_page = true;
+                       /*
+                        * Ok, we have the page, and it's up-to-date, so
+                        * now we can copy it to user space...
+                        */
+                       ret = copy_page_to_iter(page, offset, nr, to);
+                       put_page(page);
+
+               } else if (iter_is_iovec(to)) {
+                       /*
+                        * Copy to user tends to be so well optimized, but
+                        * clear_user() not so much, that it is noticeably
+                        * faster to copy the zero page instead of clearing.
+                        */
+                       ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to);
                } else {
-                       page = ZERO_PAGE(0);
-                       got_page = false;
+                       /*
+                        * But submitting the same page twice in a row to
+                        * splice() - or others? - can result in confusion:
+                        * so don't attempt that optimization on pipes etc.
+                        */
+                       ret = iov_iter_zero(nr, to);
                }
 
-               /*
-                * Ok, we have the page, and it's up-to-date, so
-                * now we can copy it to user space...
-                */
-               ret = copy_page_to_iter(page, offset, nr, to);
                retval += ret;
                offset += ret;
                index += offset >> PAGE_SHIFT;
                offset &= ~PAGE_MASK;
 
-               if (got_page)
-                       put_page(page);
                if (!iov_iter_count(to))
                        break;
                if (ret < nr) {
index b04e40078bdf7d63e540fe20da21049b00779d3a..0edb474edef1839662d4e1a4edeacfceea14bb41 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3665,7 +3665,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
 #endif /* CONFIG_NUMA */
 
 #ifdef CONFIG_PRINTK
-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
 {
        struct kmem_cache *cachep;
        unsigned int objnr;
index fd7ae2024897d5ccb0bdabb5f079544e3b5183f1..95eb34174c1bb538e38a9c2432273f4db2b73bb4 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -868,7 +868,7 @@ struct kmem_obj_info {
        void *kp_stack[KS_ADDRS_COUNT];
        void *kp_free_stack[KS_ADDRS_COUNT];
 };
-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
+void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
 #endif
 
 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
index 6ee64d6208b395980d346d8bf4518bd77eec3f60..2b3206a2c3b51064f8813ab03412152d4f6fc043 100644 (file)
@@ -555,6 +555,13 @@ bool kmem_valid_obj(void *object)
 }
 EXPORT_SYMBOL_GPL(kmem_valid_obj);
 
+static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+{
+       if (__kfence_obj_info(kpp, object, slab))
+               return;
+       __kmem_obj_info(kpp, object, slab);
+}
+
 /**
  * kmem_dump_obj - Print available slab provenance information
  * @object: slab object for which to find provenance information.
@@ -590,6 +597,8 @@ void kmem_dump_obj(void *object)
                pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
        else
                pr_cont(" slab%s", cp);
+       if (is_kfence_address(object))
+               pr_cont(" (kfence)");
        if (kp.kp_objp)
                pr_cont(" start %px", kp.kp_objp);
        if (kp.kp_data_offset)
index dfa6808dff36f531af6140746aec5befcf1d766d..40ea6e2d4ccd3f67c1a558cbdb35475c81bcaaf3 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -463,7 +463,7 @@ out:
 }
 
 #ifdef CONFIG_PRINTK
-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
 {
        kpp->kp_ptr = object;
        kpp->kp_slab = slab;
index 74d92aa4a3a28d744d3d5dfd4b30f5d632172207..ed5c2c03a47aaeac43a2c5534f34cdf7da9fdc38 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4312,7 +4312,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
 }
 
 #ifdef CONFIG_PRINTK
-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
 {
        void *base;
        int __maybe_unused i;
index bceff0cb559c979d4a50f442c5b1a2d1cd6075c7..7e320ec08c6ae2d938cd02e894b674decf913160 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -624,7 +624,6 @@ void lru_add_drain_cpu(int cpu)
                pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
 
        activate_page_drain(cpu);
-       mlock_page_drain(cpu);
 }
 
 /**
@@ -706,6 +705,7 @@ void lru_add_drain(void)
        local_lock(&lru_pvecs.lock);
        lru_add_drain_cpu(smp_processor_id());
        local_unlock(&lru_pvecs.lock);
+       mlock_page_drain_local();
 }
 
 /*
@@ -720,6 +720,7 @@ static void lru_add_and_bh_lrus_drain(void)
        lru_add_drain_cpu(smp_processor_id());
        local_unlock(&lru_pvecs.lock);
        invalidate_bh_lrus_cpu();
+       mlock_page_drain_local();
 }
 
 void lru_add_drain_cpu_zone(struct zone *zone)
@@ -728,6 +729,7 @@ void lru_add_drain_cpu_zone(struct zone *zone)
        lru_add_drain_cpu(smp_processor_id());
        drain_local_pages(zone);
        local_unlock(&lru_pvecs.lock);
+       mlock_page_drain_local();
 }
 
 #ifdef CONFIG_SMP
index e163372d396798fdedb45e87964531f1599bb183..07da85ae825b6455bc128d948092733d5043618c 100644 (file)
@@ -1671,17 +1671,6 @@ static DEFINE_MUTEX(vmap_purge_lock);
 /* for per-CPU blocks */
 static void purge_fragmented_blocks_allcpus(void);
 
-#ifdef CONFIG_X86_64
-/*
- * called before a call to iounmap() if the caller wants vm_area_struct's
- * immediately freed.
- */
-void set_iounmap_nonlazy(void)
-{
-       atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
-}
-#endif /* CONFIG_X86_64 */
-
 /*
  * Purges all lazily-freed vmap areas.
  */
@@ -3106,7 +3095,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
                return NULL;
        }
 
-       if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP)) {
+       if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) {
                unsigned long size_per_node;
 
                /*
@@ -3273,21 +3262,24 @@ void *vmalloc(unsigned long size)
 EXPORT_SYMBOL(vmalloc);
 
 /**
- * vmalloc_no_huge - allocate virtually contiguous memory using small pages
- * @size:    allocation size
+ * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
+ * @size:      allocation size
+ * @gfp_mask:  flags for the page level allocator
  *
- * Allocate enough non-huge pages to cover @size from the page level
+ * Allocate enough pages to cover @size from the page level
  * allocator and map them into contiguous kernel virtual space.
+ * If @size is greater than or equal to PMD_SIZE, allow using
+ * huge pages for the memory
  *
  * Return: pointer to the allocated memory or %NULL on error
  */
-void *vmalloc_no_huge(unsigned long size)
+void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
 {
        return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
-                                   GFP_KERNEL, PAGE_KERNEL, VM_NO_HUGE_VMAP,
+                                   gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
                                    NUMA_NO_NODE, __builtin_return_address(0));
 }
-EXPORT_SYMBOL(vmalloc_no_huge);
+EXPORT_SYMBOL_GPL(vmalloc_huge);
 
 /**
  * vzalloc - allocate virtually contiguous memory with zero fill
index 8cc44c36723171e77f2695c22d9bfd70bcdfb6cc..18affda2b522ac952effa064ac87adf1dee6c3b2 100644 (file)
@@ -353,6 +353,8 @@ static int br_switchdev_vlan_attr_replay(struct net_device *br_dev,
        attr.orig_dev = br_dev;
 
        vg = br_vlan_group(br);
+       if (!vg)
+               return 0;
 
        list_for_each_entry(v, &vg->vlan_list, vlist) {
                if (v->msti) {
index bafb0fb5f0e0ece5f8130676096381fdafc76792..ff5d7870294e89a673f85b489e963153045351bd 100644 (file)
@@ -906,6 +906,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
        struct canfd_frame *cf;
        int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR) ? 1 : 0;
        int wait_tx_done = (so->opt.flags & CAN_ISOTP_WAIT_TX_DONE) ? 1 : 0;
+       s64 hrtimer_sec = 0;
        int off;
        int err;
 
@@ -1004,7 +1005,9 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
                isotp_create_fframe(cf, so, ae);
 
                /* start timeout for FC */
-               hrtimer_start(&so->txtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
+               hrtimer_sec = 1;
+               hrtimer_start(&so->txtimer, ktime_set(hrtimer_sec, 0),
+                             HRTIMER_MODE_REL_SOFT);
        }
 
        /* send the first or only CAN frame */
@@ -1017,6 +1020,11 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
        if (err) {
                pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
                               __func__, ERR_PTR(err));
+
+               /* no transmission -> no timeout monitoring */
+               if (hrtimer_sec)
+                       hrtimer_cancel(&so->txtimer);
+
                goto err_out_drop;
        }
 
index 8c6c08446556a23baae724d263bb9bd63eb39c7e..1461c2d9dec8099a9a2d43a704b4c6cb0375f480 100644 (file)
@@ -10304,7 +10304,7 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
 }
 EXPORT_SYMBOL(netdev_stats_to_stats64);
 
-struct net_device_core_stats *netdev_core_stats_alloc(struct net_device *dev)
+struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev)
 {
        struct net_device_core_stats __percpu *p;
 
@@ -10315,11 +10315,7 @@ struct net_device_core_stats *netdev_core_stats_alloc(struct net_device *dev)
                free_percpu(p);
 
        /* This READ_ONCE() pairs with the cmpxchg() above */
-       p = READ_ONCE(dev->core_stats);
-       if (!p)
-               return NULL;
-
-       return this_cpu_ptr(p);
+       return READ_ONCE(dev->core_stats);
 }
 EXPORT_SYMBOL(netdev_core_stats_alloc);
 
@@ -10356,9 +10352,9 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
 
                for_each_possible_cpu(i) {
                        core_stats = per_cpu_ptr(p, i);
-                       storage->rx_dropped += local_read(&core_stats->rx_dropped);
-                       storage->tx_dropped += local_read(&core_stats->tx_dropped);
-                       storage->rx_nohandler += local_read(&core_stats->rx_nohandler);
+                       storage->rx_dropped += READ_ONCE(core_stats->rx_dropped);
+                       storage->tx_dropped += READ_ONCE(core_stats->tx_dropped);
+                       storage->rx_nohandler += READ_ONCE(core_stats->rx_nohandler);
                }
        }
        return storage;
index 03b6e649c4288a4a8014cf6c200bd0bec0819b7b..6f7ec72016dcd5e0caa90ea631d06317e667d8d7 100644 (file)
@@ -1032,7 +1032,7 @@ bool __skb_flow_dissect(const struct net *net,
                key_eth_addrs = skb_flow_dissector_target(flow_dissector,
                                                          FLOW_DISSECTOR_KEY_ETH_ADDRS,
                                                          target_container);
-               memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs));
+               memcpy(key_eth_addrs, eth, sizeof(*key_eth_addrs));
        }
 
 proto_again:
@@ -1183,6 +1183,7 @@ proto_again:
                                         VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
                        }
                        key_vlan->vlan_tpid = saved_vlan_tpid;
+                       key_vlan->vlan_eth_type = proto;
                }
 
                fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
index 159c9c61e6af353cadbdbb03281d2271d36f7e71..d1381ea6d52e092ca3684f122118d6db9ce87cee 100644 (file)
@@ -5242,6 +5242,8 @@ static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
                *prividx = attr_id_l3_stats;
 
                size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3);
+               if (!size_l3)
+                       goto skip_l3_stats;
                attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3,
                                         IFLA_OFFLOAD_XSTATS_UNSPEC);
                if (!attr)
@@ -5253,6 +5255,7 @@ static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
                        return err;
 
                have_data = true;
+skip_l3_stats:
                *prividx = 0;
        }
 
index ca6af86964bcef49192a139d2722ea6f19174a27..cf933225df32492c084567215a2333ed55ada802 100644 (file)
@@ -562,7 +562,6 @@ static void dsa_port_teardown(struct dsa_port *dp)
 {
        struct devlink_port *dlp = &dp->devlink_port;
        struct dsa_switch *ds = dp->ds;
-       struct net_device *slave;
 
        if (!dp->setup)
                return;
@@ -584,11 +583,9 @@ static void dsa_port_teardown(struct dsa_port *dp)
                dsa_port_link_unregister_of(dp);
                break;
        case DSA_PORT_TYPE_USER:
-               slave = dp->slave;
-
-               if (slave) {
+               if (dp->slave) {
+                       dsa_slave_destroy(dp->slave);
                        dp->slave = NULL;
-                       dsa_slave_destroy(slave);
                }
                break;
        }
@@ -1147,17 +1144,17 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
        if (err)
                goto teardown_cpu_ports;
 
-       err = dsa_tree_setup_master(dst);
+       err = dsa_tree_setup_ports(dst);
        if (err)
                goto teardown_switches;
 
-       err = dsa_tree_setup_ports(dst);
+       err = dsa_tree_setup_master(dst);
        if (err)
-               goto teardown_master;
+               goto teardown_ports;
 
        err = dsa_tree_setup_lags(dst);
        if (err)
-               goto teardown_ports;
+               goto teardown_master;
 
        dst->setup = true;
 
@@ -1165,10 +1162,10 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
 
        return 0;
 
-teardown_ports:
-       dsa_tree_teardown_ports(dst);
 teardown_master:
        dsa_tree_teardown_master(dst);
+teardown_ports:
+       dsa_tree_teardown_ports(dst);
 teardown_switches:
        dsa_tree_teardown_switches(dst);
 teardown_cpu_ports:
@@ -1186,10 +1183,10 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
 
        dsa_tree_teardown_lags(dst);
 
-       dsa_tree_teardown_ports(dst);
-
        dsa_tree_teardown_master(dst);
 
+       dsa_tree_teardown_ports(dst);
+
        dsa_tree_teardown_switches(dst);
 
        dsa_tree_teardown_cpu_ports(dst);
index 32d472a8224194b1049205919676980d64970088..cdc56ba11f52b2eaa813d61addde078831813733 100644 (file)
@@ -1620,8 +1620,10 @@ int dsa_port_link_register_of(struct dsa_port *dp)
                        if (ds->ops->phylink_mac_link_down)
                                ds->ops->phylink_mac_link_down(ds, port,
                                        MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
+                       of_node_put(phy_np);
                        return dsa_port_phylink_register(dp);
                }
+               of_node_put(phy_np);
                return 0;
        }
 
index 41c69a6e7854ac2fef309010d57ff99aa8c9cfbd..8022d50584db72618647a15a305560126a25cf07 100644 (file)
@@ -285,7 +285,7 @@ static void dsa_port_manage_cpu_flood(struct dsa_port *dp)
                if (other_dp->slave->flags & IFF_ALLMULTI)
                        flags.val |= BR_MCAST_FLOOD;
                if (other_dp->slave->flags & IFF_PROMISC)
-                       flags.val |= BR_FLOOD;
+                       flags.val |= BR_FLOOD | BR_MCAST_FLOOD;
        }
 
        err = dsa_port_pre_bridge_flags(dp, flags, NULL);
index f64b805303cd798dd48c92f68c960e19bcbe25a2..eb204ad36eeec083f9f4bf8f9ff89baf06eea72f 100644 (file)
@@ -21,6 +21,14 @@ static struct sk_buff *hellcreek_xmit(struct sk_buff *skb,
        struct dsa_port *dp = dsa_slave_to_port(dev);
        u8 *tag;
 
+       /* Calculate checksums (if required) before adding the trailer tag to
+        * avoid including it in calculations. That would lead to wrong
+        * checksums after the switch strips the tag.
+        */
+       if (skb->ip_summed == CHECKSUM_PARTIAL &&
+           skb_checksum_help(skb))
+               return NULL;
+
        /* Tag encoding */
        tag  = skb_put(skb, HELLCREEK_TAG_LEN);
        *tag = BIT(dp->index);
index 70e6c87fbe3df15c5ca6adc68b685e3a2afa21a4..d747166bb291ccf95ae8ac8d6863177cda6649e3 100644 (file)
@@ -446,7 +446,6 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
        struct page *page;
        struct sk_buff *trailer;
        int tailen = esp->tailen;
-       unsigned int allocsz;
 
        /* this is non-NULL only with TCP/UDP Encapsulation */
        if (x->encap) {
@@ -456,8 +455,8 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
                        return err;
        }
 
-       allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES);
-       if (allocsz > ESP_SKB_FRAG_MAXSIZE)
+       if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
+           ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
                goto cow;
 
        if (!skb_cloned(skb)) {
index 99db2e41ed10f1fd6472b16278307ee97eee0c57..aacee9dd771b4ae1317b53b9128d1051f7450fc8 100644 (file)
@@ -459,14 +459,12 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
                       __be16 proto)
 {
        struct ip_tunnel *tunnel = netdev_priv(dev);
-
-       if (tunnel->parms.o_flags & TUNNEL_SEQ)
-               tunnel->o_seqno++;
+       __be16 flags = tunnel->parms.o_flags;
 
        /* Push GRE header. */
        gre_build_header(skb, tunnel->tun_hlen,
-                        tunnel->parms.o_flags, proto, tunnel->parms.o_key,
-                        htonl(tunnel->o_seqno));
+                        flags, proto, tunnel->parms.o_key,
+                        (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
 
        ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
 }
@@ -504,7 +502,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
                (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
        gre_build_header(skb, tunnel_hlen, flags, proto,
                         tunnel_id_to_key32(tun_info->key.tun_id),
-                        (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
+                        (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
 
        ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
 
@@ -581,7 +579,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        gre_build_header(skb, 8, TUNNEL_SEQ,
-                        proto, 0, htonl(tunnel->o_seqno++));
+                        proto, 0, htonl(atomic_fetch_inc(&tunnel->o_seqno)));
 
        ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
 
@@ -605,8 +603,8 @@ static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
        key = &info->key;
        ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
                            tunnel_id_to_key32(key->tun_id),
-                           key->tos & ~INET_ECN_MASK, 0, skb->mark,
-                           skb_get_hash(skb));
+                           key->tos & ~INET_ECN_MASK, dev_net(dev), 0,
+                           skb->mark, skb_get_hash(skb));
        rt = ip_route_output_key(dev_net(dev), &fl4);
        if (IS_ERR(rt))
                return PTR_ERR(rt);
index 5a473319d3a5c8924d1101f95ea83244f5dbb833..94017a8c39945612443ba32a936f4e85eb6533ad 100644 (file)
@@ -294,8 +294,8 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
 
                ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr,
                                    iph->saddr, tunnel->parms.o_key,
-                                   RT_TOS(iph->tos), tunnel->parms.link,
-                                   tunnel->fwmark, 0);
+                                   RT_TOS(iph->tos), dev_net(dev),
+                                   tunnel->parms.link, tunnel->fwmark, 0);
                rt = ip_route_output_key(tunnel->net, &fl4);
 
                if (!IS_ERR(rt)) {
@@ -570,7 +570,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
        }
        ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src,
                            tunnel_id_to_key32(key->tun_id), RT_TOS(tos),
-                           0, skb->mark, skb_get_hash(skb));
+                           dev_net(dev), 0, skb->mark, skb_get_hash(skb));
        if (tunnel->encap.type != TUNNEL_ENCAP_NONE)
                goto tx_error;
 
@@ -726,7 +726,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
        }
 
        ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
-                           tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
+                           tunnel->parms.o_key, RT_TOS(tos),
+                           dev_net(dev), tunnel->parms.link,
                            tunnel->fwmark, skb_get_hash(skb));
 
        if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
diff --git a/net/ipv4/netfilter/nf_flow_table_ipv4.c b/net/ipv4/netfilter/nf_flow_table_ipv4.c
deleted file mode 100644 (file)
index e69de29..0000000
index 2cb3b852d14861231ac47f0b3e4daeb57682ffd2..f33c31dd7366c06a642bdc5954856efa9d8da0ac 100644 (file)
@@ -281,6 +281,7 @@ bool cookie_ecn_ok(const struct tcp_options_received *tcp_opt,
 EXPORT_SYMBOL(cookie_ecn_ok);
 
 struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
+                                           const struct tcp_request_sock_ops *af_ops,
                                            struct sock *sk,
                                            struct sk_buff *skb)
 {
@@ -297,6 +298,10 @@ struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
                return NULL;
 
        treq = tcp_rsk(req);
+
+       /* treq->af_specific might be used to perform TCP_MD5 lookup */
+       treq->af_specific = af_ops;
+
        treq->syn_tos = TCP_SKB_CB(skb)->ip_dsfield;
 #if IS_ENABLED(CONFIG_MPTCP)
        treq->is_mptcp = sk_is_mptcp(sk);
@@ -364,7 +369,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
                goto out;
 
        ret = NULL;
-       req = cookie_tcp_reqsk_alloc(&tcp_request_sock_ops, sk, skb);
+       req = cookie_tcp_reqsk_alloc(&tcp_request_sock_ops,
+                                    &tcp_request_sock_ipv4_ops, sk, skb);
        if (!req)
                goto out;
 
index 2088f93fa37b5fb9110e7933242a27bd4009990e..48f6075228600896daa6507c4cd06acfc851a0fa 100644 (file)
@@ -5454,7 +5454,17 @@ static void tcp_new_space(struct sock *sk)
        INDIRECT_CALL_1(sk->sk_write_space, sk_stream_write_space, sk);
 }
 
-static void tcp_check_space(struct sock *sk)
+/* Caller made space either from:
+ * 1) Freeing skbs in rtx queues (after tp->snd_una has advanced)
+ * 2) Sent skbs from output queue (and thus advancing tp->snd_nxt)
+ *
+ * We might be able to generate EPOLLOUT to the application if:
+ * 1) Space consumed in output/rtx queues is below sk->sk_sndbuf/2
+ * 2) notsent amount (tp->write_seq - tp->snd_nxt) became
+ *    small enough that tcp_stream_memory_free() decides it
+ *    is time to generate EPOLLOUT.
+ */
+void tcp_check_space(struct sock *sk)
 {
        /* pairs with tcp_poll() */
        smp_mb();
index 6366df7aaf2a6d655162a73ae1b19205a5ca0a23..6854bb1fb32b265ef4c0838267bf272c57f7601e 100644 (file)
@@ -531,7 +531,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
        newtp->tsoffset = treq->ts_off;
 #ifdef CONFIG_TCP_MD5SIG
        newtp->md5sig_info = NULL;      /*XXX*/
-       if (newtp->af_specific->md5_lookup(sk, newsk))
+       if (treq->af_specific->req_md5_lookup(sk, req_to_sk(req)))
                newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
 #endif
        if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
index 9ede847f4199844c5884e3f62ea450562072a0a7..1ca2f28c9981018e6cfaee3435d711467af6048d 100644 (file)
@@ -82,6 +82,7 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
 
        NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
                      tcp_skb_pcount(skb));
+       tcp_check_space(sk);
 }
 
 /* SND.NXT, if window was not shrunk or the amount of shrunk was less than one
index fbab921670cc91a121f2ab8cd7aa6ecfd3748535..9a8e014d9b5b99e3a8b970a2c6b1e545f542bee5 100644 (file)
@@ -74,27 +74,32 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
  *
  * If an ACK (s)acks multiple skbs (e.g., stretched-acks), this function is
  * called multiple times. We favor the information from the most recently
- * sent skb, i.e., the skb with the highest prior_delivered count.
+ * sent skb, i.e., the skb with the most recently sent time and the highest
+ * sequence.
  */
 void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
                            struct rate_sample *rs)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
+       u64 tx_tstamp;
 
        if (!scb->tx.delivered_mstamp)
                return;
 
+       tx_tstamp = tcp_skb_timestamp_us(skb);
        if (!rs->prior_delivered ||
-           after(scb->tx.delivered, rs->prior_delivered)) {
+           tcp_skb_sent_after(tx_tstamp, tp->first_tx_mstamp,
+                              scb->end_seq, rs->last_end_seq)) {
                rs->prior_delivered_ce  = scb->tx.delivered_ce;
                rs->prior_delivered  = scb->tx.delivered;
                rs->prior_mstamp     = scb->tx.delivered_mstamp;
                rs->is_app_limited   = scb->tx.is_app_limited;
                rs->is_retrans       = scb->sacked & TCPCB_RETRANS;
+               rs->last_end_seq     = scb->end_seq;
 
                /* Record send time of most recently ACKed packet: */
-               tp->first_tx_mstamp  = tcp_skb_timestamp_us(skb);
+               tp->first_tx_mstamp  = tx_tstamp;
                /* Find the duration of the "send phase" of this window: */
                rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
                                                     scb->tx.first_tx_mstamp);
index 55d604c9b3b3ea9898fa660cd46da14cb849d5f8..f2120e92caf15d43252102fba31c7c0153c3a446 100644 (file)
@@ -482,7 +482,6 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
        struct page *page;
        struct sk_buff *trailer;
        int tailen = esp->tailen;
-       unsigned int allocsz;
 
        if (x->encap) {
                int err = esp6_output_encap(x, skb, esp);
@@ -491,8 +490,8 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
                        return err;
        }
 
-       allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES);
-       if (allocsz > ESP_SKB_FRAG_MAXSIZE)
+       if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
+           ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
                goto cow;
 
        if (!skb_cloned(skb)) {
index 8753e9cec326433a43f1ca357a45384e3ad00c52..5136959b3dc5d64b3bbf2005f52ea1c67d342877 100644 (file)
@@ -724,6 +724,7 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
 {
        struct ip6_tnl *tunnel = netdev_priv(dev);
        __be16 protocol;
+       __be16 flags;
 
        if (dev->type == ARPHRD_ETHER)
                IPCB(skb)->flags = 0;
@@ -733,16 +734,13 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
        else
                fl6->daddr = tunnel->parms.raddr;
 
-       if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
-               return -ENOMEM;
-
        /* Push GRE header. */
        protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
 
        if (tunnel->parms.collect_md) {
                struct ip_tunnel_info *tun_info;
                const struct ip_tunnel_key *key;
-               __be16 flags;
+               int tun_hlen;
 
                tun_info = skb_tunnel_info_txcheck(skb);
                if (IS_ERR(tun_info) ||
@@ -760,21 +758,27 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
                dsfield = key->tos;
                flags = key->tun_flags &
                        (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
-               tunnel->tun_hlen = gre_calc_hlen(flags);
+               tun_hlen = gre_calc_hlen(flags);
 
-               gre_build_header(skb, tunnel->tun_hlen,
+               if (skb_cow_head(skb, dev->needed_headroom ?: tun_hlen + tunnel->encap_hlen))
+                       return -ENOMEM;
+
+               gre_build_header(skb, tun_hlen,
                                 flags, protocol,
                                 tunnel_id_to_key32(tun_info->key.tun_id),
-                                (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++)
+                                (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno))
                                                      : 0);
 
        } else {
-               if (tunnel->parms.o_flags & TUNNEL_SEQ)
-                       tunnel->o_seqno++;
+               if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
+                       return -ENOMEM;
+
+               flags = tunnel->parms.o_flags;
 
-               gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
+               gre_build_header(skb, tunnel->tun_hlen, flags,
                                 protocol, tunnel->parms.o_key,
-                                htonl(tunnel->o_seqno));
+                                (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno))
+                                                     : 0);
        }
 
        return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
@@ -1052,7 +1056,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
        /* Push GRE header. */
        proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
                                           : htons(ETH_P_ERSPAN2);
-       gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
+       gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(atomic_fetch_inc(&t->o_seqno)));
 
        /* TooBig packet may have updated dst->dev's mtu */
        if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
index e23f058166af582af3adf9ce1267286cf334ff7b..fa63ef2bd99cca97aff455d4b39d267f69365b35 100644 (file)
@@ -485,7 +485,7 @@ int ip6_forward(struct sk_buff *skb)
                goto drop;
 
        if (!net->ipv6.devconf_all->disable_policy &&
-           !idev->cnf.disable_policy &&
+           (!idev || !idev->cnf.disable_policy) &&
            !xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
                __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
                goto drop;
index 1da332450d98eb7f6e6ca37c260c6562f72c2cbe..8ce60ab89015df2f85d3b09d643acfd43ef628b4 100644 (file)
@@ -24,14 +24,13 @@ int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff
 {
        const struct ipv6hdr *iph = ipv6_hdr(skb);
        struct sock *sk = sk_to_full_sk(sk_partial);
+       struct net_device *dev = skb_dst(skb)->dev;
        struct flow_keys flkeys;
        unsigned int hh_len;
        struct dst_entry *dst;
        int strict = (ipv6_addr_type(&iph->daddr) &
                      (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
        struct flowi6 fl6 = {
-               .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
-                       strict ? skb_dst(skb)->dev->ifindex : 0,
                .flowi6_mark = skb->mark,
                .flowi6_uid = sock_net_uid(net, sk),
                .daddr = iph->daddr,
@@ -39,6 +38,13 @@ int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff
        };
        int err;
 
+       if (sk && sk->sk_bound_dev_if)
+               fl6.flowi6_oif = sk->sk_bound_dev_if;
+       else if (strict)
+               fl6.flowi6_oif = dev->ifindex;
+       else
+               fl6.flowi6_oif = l3mdev_master_ifindex(dev);
+
        fib6_rules_early_flow_dissect(net, skb, &fl6, &flkeys);
        dst = ip6_route_output(net, sk, &fl6);
        err = dst->error;
index 169e9df6d172ead607cc20a108c3371a20dbc632..c4b6ce017d5e3bf63c66a53df3d46c08370aed23 100644 (file)
@@ -3292,6 +3292,7 @@ static int ip6_dst_gc(struct dst_ops *ops)
        int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
        int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
        unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
+       unsigned int val;
        int entries;
 
        entries = dst_entries_get_fast(ops);
@@ -3302,13 +3303,13 @@ static int ip6_dst_gc(struct dst_ops *ops)
            entries <= rt_max_size)
                goto out;
 
-       net->ipv6.ip6_rt_gc_expire++;
-       fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
+       fib6_run_gc(atomic_inc_return(&net->ipv6.ip6_rt_gc_expire), net, true);
        entries = dst_entries_get_slow(ops);
        if (entries < ops->gc_thresh)
-               net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
+               atomic_set(&net->ipv6.ip6_rt_gc_expire, rt_gc_timeout >> 1);
 out:
-       net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
+       val = atomic_read(&net->ipv6.ip6_rt_gc_expire);
+       atomic_set(&net->ipv6.ip6_rt_gc_expire, val - (val >> rt_elasticity));
        return entries > rt_max_size;
 }
 
@@ -6509,7 +6510,7 @@ static int __net_init ip6_route_net_init(struct net *net)
        net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
        net->ipv6.sysctl.skip_notify_on_dev_down = 0;
 
-       net->ipv6.ip6_rt_gc_expire = 30*HZ;
+       atomic_set(&net->ipv6.ip6_rt_gc_expire, 30*HZ);
 
        ret = 0;
 out:
index d1b61d00368e1f58725e9997f74a0b144901277e..9cc123f000fbcfbeff7728bfee5339d6dd6470f9 100644 (file)
@@ -170,7 +170,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
                goto out;
 
        ret = NULL;
-       req = cookie_tcp_reqsk_alloc(&tcp6_request_sock_ops, sk, skb);
+       req = cookie_tcp_reqsk_alloc(&tcp6_request_sock_ops,
+                                    &tcp_request_sock_ipv6_ops, sk, skb);
        if (!req)
                goto out;
 
index 4eb8892fb2ffebfff81919520786971276e9870f..ca10916340b098fb1f7e8c577bcca1eec4f4814d 100644 (file)
@@ -147,7 +147,7 @@ int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
 
        dev = dev_get_by_index_rcu(net, ifindex);
        while (dev && !netif_is_l3_master(dev))
-               dev = netdev_master_upper_dev_get(dev);
+               dev = netdev_master_upper_dev_get_rcu(dev);
 
        return dev ? dev->ifindex : 0;
 }
index 9479f2787ea79a1c466dddcea98de53d6b0a722e..88d9cc945a216c4f8b412b26965d37dbebd093fa 100644 (file)
@@ -441,7 +441,7 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
 #define PRINT_HT_CAP(_cond, _str) \
        do { \
        if (_cond) \
-                       p += scnprintf(p, sizeof(buf)+buf-p, "\t" _str "\n"); \
+                       p += scnprintf(p, bufsz + buf - p, "\t" _str "\n"); \
        } while (0)
        char *buf, *p;
        int i;
index f49be882e98e2c814aeeb66e2537fbfff99c16ab..99a3bda8852f83a73b4fa4310a8cf85233b219d8 100644 (file)
@@ -313,6 +313,7 @@ void mctp_dev_hold(struct mctp_dev *mdev)
 void mctp_dev_put(struct mctp_dev *mdev)
 {
        if (mdev && refcount_dec_and_test(&mdev->refs)) {
+               kfree(mdev->addrs);
                dev_put(mdev->dev);
                kfree_rcu(mdev, rcu);
        }
@@ -441,7 +442,6 @@ static void mctp_unregister(struct net_device *dev)
 
        mctp_route_remove_dev(mdev);
        mctp_neigh_remove_dev(mdev);
-       kfree(mdev->addrs);
 
        mctp_dev_put(mdev);
 }
index 2c467c422dc6343a296ccfff9097069440fd66cd..fb67f1ca2495b3e5e157d72608d0b6916a49bc61 100644 (file)
@@ -1495,7 +1495,7 @@ int __init ip_vs_conn_init(void)
        pr_info("Connection hash table configured "
                "(size=%d, memory=%ldKbytes)\n",
                ip_vs_conn_tab_size,
-               (long)(ip_vs_conn_tab_size*sizeof(struct list_head))/1024);
+               (long)(ip_vs_conn_tab_size*sizeof(*ip_vs_conn_tab))/1024);
        IP_VS_DBG(0, "Each connection entry needs %zd bytes at least\n",
                  sizeof(struct ip_vs_conn));
 
index 128ee3b300d610d0993886b69f2b1971ddd4a92a..16c3a39689f475d3b5b7d9a8b838494ff04b1bd6 100644 (file)
@@ -9363,7 +9363,7 @@ int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest)
 }
 EXPORT_SYMBOL_GPL(nft_parse_u32_check);
 
-static unsigned int nft_parse_register(const struct nlattr *attr, u32 *preg)
+static int nft_parse_register(const struct nlattr *attr, u32 *preg)
 {
        unsigned int reg;
 
index d600a566da324ae7198635d81757ce78f7205489..7325bee7d14425232f953e630b7453cd490b5ce9 100644 (file)
@@ -349,7 +349,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
                                *ext = &rbe->ext;
                                return -EEXIST;
                        } else {
-                               p = &parent->rb_left;
+                               overlap = false;
+                               if (nft_rbtree_interval_end(rbe))
+                                       p = &parent->rb_left;
+                               else
+                                       p = &parent->rb_right;
                        }
                }
 
index bd3792f080ed0673a6be9f29a0c51aeade966dfb..6d9e8e0a3a7d264a2a98e7bb3839ebf3ae0207cf 100644 (file)
@@ -37,12 +37,11 @@ static void nft_socket_wildcard(const struct nft_pktinfo *pkt,
 
 #ifdef CONFIG_SOCK_CGROUP_DATA
 static noinline bool
-nft_sock_get_eval_cgroupv2(u32 *dest, const struct nft_pktinfo *pkt, u32 level)
+nft_sock_get_eval_cgroupv2(u32 *dest, struct sock *sk, const struct nft_pktinfo *pkt, u32 level)
 {
-       struct sock *sk = skb_to_full_sk(pkt->skb);
        struct cgroup *cgrp;
 
-       if (!sk || !sk_fullsock(sk) || !net_eq(nft_net(pkt), sock_net(sk)))
+       if (!sk_fullsock(sk))
                return false;
 
        cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
@@ -109,7 +108,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
                break;
 #ifdef CONFIG_SOCK_CGROUP_DATA
        case NFT_SOCKET_CGROUPV2:
-               if (!nft_sock_get_eval_cgroupv2(dest, pkt, priv->level)) {
+               if (!nft_sock_get_eval_cgroupv2(dest, sk, pkt, priv->level)) {
                        regs->verdict.code = NFT_BREAK;
                        return;
                }
index 47a876ccd28816a6065d1c4b7de8cfc97e887a69..05a3795eac8e9a7c8343460d9a41e0755a64c36e 100644 (file)
@@ -2263,6 +2263,13 @@ static int netlink_dump(struct sock *sk)
         * single netdev. The outcome is MSG_TRUNC error.
         */
        skb_reserve(skb, skb_tailroom(skb) - alloc_size);
+
+       /* Make sure malicious BPF programs can not read unitialized memory
+        * from skb->head -> skb->data
+        */
+       skb_reset_network_header(skb);
+       skb_reset_mac_header(skb);
+
        netlink_skb_set_owner_r(skb, sk);
 
        if (nlk->dump_done_errno > 0) {
index d2537383a3e89d181f8281f1d9c3508da8bdc39f..6a193cce2a754e18950555ea6ce9f2853c348a83 100644 (file)
@@ -560,6 +560,10 @@ static int nci_close_device(struct nci_dev *ndev)
        mutex_lock(&ndev->req_lock);
 
        if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
+               /* Need to flush the cmd wq in case
+                * there is a queued/running cmd_work
+                */
+               flush_workqueue(ndev->cmd_wq);
                del_timer_sync(&ndev->cmd_timer);
                del_timer_sync(&ndev->data_timer);
                mutex_unlock(&ndev->req_lock);
index 7176156d38443ca2b5dd859b988d60a184512f6f..4c09cf8a0ab2dcbfe9a14cc2b41d02edad1cfef0 100644 (file)
@@ -2465,7 +2465,7 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
        new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
 
        if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
-               if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
+               if ((next_offset + req_size) > MAX_ACTIONS_BUFSIZE) {
                        OVS_NLERR(log, "Flow action size exceeds max %u",
                                  MAX_ACTIONS_BUFSIZE);
                        return ERR_PTR(-EMSGSIZE);
index c39c09899fd0e1c5e7572eda554752e76c481aff..002d2b9c69dd1f95f234fe38f87dbf3a9481b0da 100644 (file)
@@ -2858,8 +2858,9 @@ tpacket_error:
 
                status = TP_STATUS_SEND_REQUEST;
                err = po->xmit(skb);
-               if (unlikely(err > 0)) {
-                       err = net_xmit_errno(err);
+               if (unlikely(err != 0)) {
+                       if (err > 0)
+                               err = net_xmit_errno(err);
                        if (err && __packet_get_status(po, ph) ==
                                   TP_STATUS_AVAILABLE) {
                                /* skb was destructed already */
@@ -3060,8 +3061,12 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
                skb->no_fcs = 1;
 
        err = po->xmit(skb);
-       if (err > 0 && (err = net_xmit_errno(err)) != 0)
-               goto out_unlock;
+       if (unlikely(err != 0)) {
+               if (err > 0)
+                       err = net_xmit_errno(err);
+               if (err)
+                       goto out_unlock;
+       }
 
        dev_put(dev);
 
index f15d6942da45306e4fa15399473044281dcbfed9..cc7e30733feb0d3f381269dfc4b9bcd5532b42ec 100644 (file)
@@ -113,7 +113,9 @@ static __net_exit void rxrpc_exit_net(struct net *net)
        struct rxrpc_net *rxnet = rxrpc_net(net);
 
        rxnet->live = false;
+       del_timer_sync(&rxnet->peer_keepalive_timer);
        cancel_work_sync(&rxnet->peer_keepalive_work);
+       /* Remove the timer again as the worker may have restarted it. */
        del_timer_sync(&rxnet->peer_keepalive_timer);
        rxrpc_destroy_all_calls(rxnet);
        rxrpc_destroy_all_connections(rxnet);
index 2957f8f5cea759315463d5e61fa1db745746e6f7..f0699f39afdb082067e581a5ff1ce217351c4a19 100644 (file)
@@ -1672,10 +1672,10 @@ static int tcf_chain_tp_insert(struct tcf_chain *chain,
        if (chain->flushing)
                return -EAGAIN;
 
+       RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
        if (*chain_info->pprev == chain->filter_chain)
                tcf_chain0_head_change(chain, tp);
        tcf_proto_get(tp);
-       RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
        rcu_assign_pointer(*chain_info->pprev, tp);
 
        return 0;
index c80fc49c0da1c7c39093a5b81c5027919d73fdd4..ed5e6f08e74a86af659fcd7aa64d92a23584cae1 100644 (file)
@@ -1013,6 +1013,7 @@ static int fl_set_key_mpls(struct nlattr **tb,
 static void fl_set_key_vlan(struct nlattr **tb,
                            __be16 ethertype,
                            int vlan_id_key, int vlan_prio_key,
+                           int vlan_next_eth_type_key,
                            struct flow_dissector_key_vlan *key_val,
                            struct flow_dissector_key_vlan *key_mask)
 {
@@ -1031,6 +1032,11 @@ static void fl_set_key_vlan(struct nlattr **tb,
        }
        key_val->vlan_tpid = ethertype;
        key_mask->vlan_tpid = cpu_to_be16(~0);
+       if (tb[vlan_next_eth_type_key]) {
+               key_val->vlan_eth_type =
+                       nla_get_be16(tb[vlan_next_eth_type_key]);
+               key_mask->vlan_eth_type = cpu_to_be16(~0);
+       }
 }
 
 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
@@ -1602,8 +1608,9 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
 
                if (eth_type_vlan(ethertype)) {
                        fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
-                                       TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
-                                       &mask->vlan);
+                                       TCA_FLOWER_KEY_VLAN_PRIO,
+                                       TCA_FLOWER_KEY_VLAN_ETH_TYPE,
+                                       &key->vlan, &mask->vlan);
 
                        if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
                                ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
@@ -1611,6 +1618,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
                                        fl_set_key_vlan(tb, ethertype,
                                                        TCA_FLOWER_KEY_CVLAN_ID,
                                                        TCA_FLOWER_KEY_CVLAN_PRIO,
+                                                       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
                                                        &key->cvlan, &mask->cvlan);
                                        fl_set_key_val(tb, &key->basic.n_proto,
                                                       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
@@ -3002,13 +3010,13 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net,
                goto nla_put_failure;
 
        if (mask->basic.n_proto) {
-               if (mask->cvlan.vlan_tpid) {
+               if (mask->cvlan.vlan_eth_type) {
                        if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
                                         key->basic.n_proto))
                                goto nla_put_failure;
-               } else if (mask->vlan.vlan_tpid) {
+               } else if (mask->vlan.vlan_eth_type) {
                        if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
-                                        key->basic.n_proto))
+                                        key->vlan.vlan_eth_type))
                                goto nla_put_failure;
                }
        }
index cf5649292ee00941e5c4a4d5b11b1c3dc98cce3f..4d27300c287c46d11bf9d44f8c66eded9e734581 100644 (file)
@@ -386,14 +386,19 @@ static int u32_init(struct tcf_proto *tp)
        return 0;
 }
 
-static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
+static void __u32_destroy_key(struct tc_u_knode *n)
 {
        struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
 
        tcf_exts_destroy(&n->exts);
-       tcf_exts_put_net(&n->exts);
        if (ht && --ht->refcnt == 0)
                kfree(ht);
+       kfree(n);
+}
+
+static void u32_destroy_key(struct tc_u_knode *n, bool free_pf)
+{
+       tcf_exts_put_net(&n->exts);
 #ifdef CONFIG_CLS_U32_PERF
        if (free_pf)
                free_percpu(n->pf);
@@ -402,8 +407,7 @@ static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
        if (free_pf)
                free_percpu(n->pcpu_success);
 #endif
-       kfree(n);
-       return 0;
+       __u32_destroy_key(n);
 }
 
 /* u32_delete_key_rcu should be called when free'ing a copied
@@ -811,10 +815,6 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
        new->flags = n->flags;
        RCU_INIT_POINTER(new->ht_down, ht);
 
-       /* bump reference count as long as we hold pointer to structure */
-       if (ht)
-               ht->refcnt++;
-
 #ifdef CONFIG_CLS_U32_PERF
        /* Statistics may be incremented by readers during update
         * so we must keep them in tact. When the node is later destroyed
@@ -836,6 +836,10 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
                return NULL;
        }
 
+       /* bump reference count as long as we hold pointer to structure */
+       if (ht)
+               ht->refcnt++;
+
        return new;
 }
 
@@ -900,13 +904,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
                                    extack);
 
                if (err) {
-                       u32_destroy_key(new, false);
+                       __u32_destroy_key(new);
                        return err;
                }
 
                err = u32_replace_hw_knode(tp, new, flags, extack);
                if (err) {
-                       u32_destroy_key(new, false);
+                       __u32_destroy_key(new);
                        return err;
                }
 
index 377f896bdedc4575316c3bab1d661d9c88142ec0..b9c71a304d399db0a31efa62ce64622163eb23a1 100644 (file)
@@ -417,7 +417,8 @@ static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
 {
        struct taprio_sched *q = qdisc_priv(sch);
 
-       if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) {
+       /* sk_flags are only safe to use on full sockets. */
+       if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) {
                if (!is_valid_interval(skb, sch))
                        return qdisc_drop(skb, sch, to_free);
        } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
index b3815b568e8e5cfbf51a20d7358566462b0867bd..463c4a58d2c36d2a975c9e771ead62bd8460a376 100644 (file)
@@ -458,6 +458,10 @@ void sctp_generate_reconf_event(struct timer_list *t)
                goto out_unlock;
        }
 
+       /* This happens when the response arrives after the timer is triggered. */
+       if (!asoc->strreset_chunk)
+               goto out_unlock;
+
        error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
                           SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_RECONF),
                           asoc->state, asoc->ep, asoc,
index 7f342bc127358d1d6bb02a6751568afad3d7be35..52edee1322fc36b0400569f8e6de2133adf6cd74 100644 (file)
@@ -781,7 +781,7 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
                }
        }
 
-       if (security_sctp_assoc_request(new_asoc, chunk->skb)) {
+       if (security_sctp_assoc_request(new_asoc, chunk->head_skb ?: chunk->skb)) {
                sctp_association_free(new_asoc);
                return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
@@ -932,7 +932,7 @@ enum sctp_disposition sctp_sf_do_5_1E_ca(struct net *net,
 
        /* Set peer label for connection. */
        if (security_sctp_assoc_established((struct sctp_association *)asoc,
-                                           chunk->skb))
+                                           chunk->head_skb ?: chunk->skb))
                return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Verify that the chunk length for the COOKIE-ACK is OK.
@@ -2262,7 +2262,7 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
        }
 
        /* Update socket peer label if first association. */
-       if (security_sctp_assoc_request(new_asoc, chunk->skb)) {
+       if (security_sctp_assoc_request(new_asoc, chunk->head_skb ?: chunk->skb)) {
                sctp_association_free(new_asoc);
                return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
index 3e1a9600be5e1a6284ebef1df39d41fe2b3bf6b4..7b0427658056d480dcafd7f0d040a267225f51c8 100644 (file)
@@ -5636,7 +5636,7 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
         * Set the daddr and initialize id to something more random and also
         * copy over any ip options.
         */
-       sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk);
+       sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sock->sk);
        sp->pf->copy_ip_options(sk, sock->sk);
 
        /* Populate the fields of the newsk from the oldsk and migrate the
index f0d118e9f155178e80f78d3e16d36126873d16dd..fce16b9d6e1a4bfaa0ee4ad23ada9a3d62bcce1a 100644 (file)
@@ -121,6 +121,7 @@ static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk,
                                          bool *own_req)
 {
        struct smc_sock *smc;
+       struct sock *child;
 
        smc = smc_clcsock_user_data(sk);
 
@@ -134,8 +135,17 @@ static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk,
        }
 
        /* passthrough to original syn recv sock fct */
-       return smc->ori_af_ops->syn_recv_sock(sk, skb, req, dst, req_unhash,
-                                             own_req);
+       child = smc->ori_af_ops->syn_recv_sock(sk, skb, req, dst, req_unhash,
+                                              own_req);
+       /* child must not inherit smc or its ops */
+       if (child) {
+               rcu_assign_sk_user_data(child, NULL);
+
+               /* v4-mapped sockets don't inherit parent ops. Don't restore. */
+               if (inet_csk(child)->icsk_af_ops == inet_csk(sk)->icsk_af_ops)
+                       inet_csk(child)->icsk_af_ops = smc->ori_af_ops;
+       }
+       return child;
 
 drop:
        dst_release(dst);
@@ -233,11 +243,27 @@ struct proto smc_proto6 = {
 };
 EXPORT_SYMBOL_GPL(smc_proto6);
 
+static void smc_fback_restore_callbacks(struct smc_sock *smc)
+{
+       struct sock *clcsk = smc->clcsock->sk;
+
+       write_lock_bh(&clcsk->sk_callback_lock);
+       clcsk->sk_user_data = NULL;
+
+       smc_clcsock_restore_cb(&clcsk->sk_state_change, &smc->clcsk_state_change);
+       smc_clcsock_restore_cb(&clcsk->sk_data_ready, &smc->clcsk_data_ready);
+       smc_clcsock_restore_cb(&clcsk->sk_write_space, &smc->clcsk_write_space);
+       smc_clcsock_restore_cb(&clcsk->sk_error_report, &smc->clcsk_error_report);
+
+       write_unlock_bh(&clcsk->sk_callback_lock);
+}
+
 static void smc_restore_fallback_changes(struct smc_sock *smc)
 {
        if (smc->clcsock->file) { /* non-accepted sockets have no file yet */
                smc->clcsock->file->private_data = smc->sk.sk_socket;
                smc->clcsock->file = NULL;
+               smc_fback_restore_callbacks(smc);
        }
 }
 
@@ -363,6 +389,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
        sk->sk_prot->hash(sk);
        sk_refcnt_debug_inc(sk);
        mutex_init(&smc->clcsock_release_lock);
+       smc_init_saved_callbacks(smc);
 
        return sk;
 }
@@ -734,47 +761,73 @@ out:
 
 static void smc_fback_state_change(struct sock *clcsk)
 {
-       struct smc_sock *smc =
-               smc_clcsock_user_data(clcsk);
+       struct smc_sock *smc;
 
-       if (!smc)
-               return;
-       smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_state_change);
+       read_lock_bh(&clcsk->sk_callback_lock);
+       smc = smc_clcsock_user_data(clcsk);
+       if (smc)
+               smc_fback_forward_wakeup(smc, clcsk,
+                                        smc->clcsk_state_change);
+       read_unlock_bh(&clcsk->sk_callback_lock);
 }
 
 static void smc_fback_data_ready(struct sock *clcsk)
 {
-       struct smc_sock *smc =
-               smc_clcsock_user_data(clcsk);
+       struct smc_sock *smc;
 
-       if (!smc)
-               return;
-       smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_data_ready);
+       read_lock_bh(&clcsk->sk_callback_lock);
+       smc = smc_clcsock_user_data(clcsk);
+       if (smc)
+               smc_fback_forward_wakeup(smc, clcsk,
+                                        smc->clcsk_data_ready);
+       read_unlock_bh(&clcsk->sk_callback_lock);
 }
 
 static void smc_fback_write_space(struct sock *clcsk)
 {
-       struct smc_sock *smc =
-               smc_clcsock_user_data(clcsk);
+       struct smc_sock *smc;
 
-       if (!smc)
-               return;
-       smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_write_space);
+       read_lock_bh(&clcsk->sk_callback_lock);
+       smc = smc_clcsock_user_data(clcsk);
+       if (smc)
+               smc_fback_forward_wakeup(smc, clcsk,
+                                        smc->clcsk_write_space);
+       read_unlock_bh(&clcsk->sk_callback_lock);
 }
 
 static void smc_fback_error_report(struct sock *clcsk)
 {
-       struct smc_sock *smc =
-               smc_clcsock_user_data(clcsk);
+       struct smc_sock *smc;
 
-       if (!smc)
-               return;
-       smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_error_report);
+       read_lock_bh(&clcsk->sk_callback_lock);
+       smc = smc_clcsock_user_data(clcsk);
+       if (smc)
+               smc_fback_forward_wakeup(smc, clcsk,
+                                        smc->clcsk_error_report);
+       read_unlock_bh(&clcsk->sk_callback_lock);
+}
+
+static void smc_fback_replace_callbacks(struct smc_sock *smc)
+{
+       struct sock *clcsk = smc->clcsock->sk;
+
+       write_lock_bh(&clcsk->sk_callback_lock);
+       clcsk->sk_user_data = (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
+
+       smc_clcsock_replace_cb(&clcsk->sk_state_change, smc_fback_state_change,
+                              &smc->clcsk_state_change);
+       smc_clcsock_replace_cb(&clcsk->sk_data_ready, smc_fback_data_ready,
+                              &smc->clcsk_data_ready);
+       smc_clcsock_replace_cb(&clcsk->sk_write_space, smc_fback_write_space,
+                              &smc->clcsk_write_space);
+       smc_clcsock_replace_cb(&clcsk->sk_error_report, smc_fback_error_report,
+                              &smc->clcsk_error_report);
+
+       write_unlock_bh(&clcsk->sk_callback_lock);
 }
 
 static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
 {
-       struct sock *clcsk;
        int rc = 0;
 
        mutex_lock(&smc->clcsock_release_lock);
@@ -782,10 +835,7 @@ static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
                rc = -EBADF;
                goto out;
        }
-       clcsk = smc->clcsock->sk;
 
-       if (smc->use_fallback)
-               goto out;
        smc->use_fallback = true;
        smc->fallback_rsn = reason_code;
        smc_stat_fallback(smc);
@@ -800,18 +850,7 @@ static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
                 * in smc sk->sk_wq and they should be woken up
                 * as clcsock's wait queue is woken up.
                 */
-               smc->clcsk_state_change = clcsk->sk_state_change;
-               smc->clcsk_data_ready = clcsk->sk_data_ready;
-               smc->clcsk_write_space = clcsk->sk_write_space;
-               smc->clcsk_error_report = clcsk->sk_error_report;
-
-               clcsk->sk_state_change = smc_fback_state_change;
-               clcsk->sk_data_ready = smc_fback_data_ready;
-               clcsk->sk_write_space = smc_fback_write_space;
-               clcsk->sk_error_report = smc_fback_error_report;
-
-               smc->clcsock->sk->sk_user_data =
-                       (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
+               smc_fback_replace_callbacks(smc);
        }
 out:
        mutex_unlock(&smc->clcsock_release_lock);
@@ -1465,6 +1504,8 @@ static void smc_connect_work(struct work_struct *work)
                smc->sk.sk_state = SMC_CLOSED;
                if (rc == -EPIPE || rc == -EAGAIN)
                        smc->sk.sk_err = EPIPE;
+               else if (rc == -ECONNREFUSED)
+                       smc->sk.sk_err = ECONNREFUSED;
                else if (signal_pending(current))
                        smc->sk.sk_err = -sock_intr_errno(timeo);
                sock_put(&smc->sk); /* passive closing */
@@ -1584,6 +1625,19 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
         * function; switch it back to the original sk_data_ready function
         */
        new_clcsock->sk->sk_data_ready = lsmc->clcsk_data_ready;
+
+       /* if new clcsock has also inherited the fallback-specific callback
+        * functions, switch them back to the original ones.
+        */
+       if (lsmc->use_fallback) {
+               if (lsmc->clcsk_state_change)
+                       new_clcsock->sk->sk_state_change = lsmc->clcsk_state_change;
+               if (lsmc->clcsk_write_space)
+                       new_clcsock->sk->sk_write_space = lsmc->clcsk_write_space;
+               if (lsmc->clcsk_error_report)
+                       new_clcsock->sk->sk_error_report = lsmc->clcsk_error_report;
+       }
+
        (*new_smc)->clcsock = new_clcsock;
 out:
        return rc;
@@ -2343,17 +2397,20 @@ out:
 
 static void smc_clcsock_data_ready(struct sock *listen_clcsock)
 {
-       struct smc_sock *lsmc =
-               smc_clcsock_user_data(listen_clcsock);
+       struct smc_sock *lsmc;
 
+       read_lock_bh(&listen_clcsock->sk_callback_lock);
+       lsmc = smc_clcsock_user_data(listen_clcsock);
        if (!lsmc)
-               return;
+               goto out;
        lsmc->clcsk_data_ready(listen_clcsock);
        if (lsmc->sk.sk_state == SMC_LISTEN) {
                sock_hold(&lsmc->sk); /* sock_put in smc_tcp_listen_work() */
                if (!queue_work(smc_tcp_ls_wq, &lsmc->tcp_listen_work))
                        sock_put(&lsmc->sk);
        }
+out:
+       read_unlock_bh(&listen_clcsock->sk_callback_lock);
 }
 
 static int smc_listen(struct socket *sock, int backlog)
@@ -2385,10 +2442,12 @@ static int smc_listen(struct socket *sock, int backlog)
        /* save original sk_data_ready function and establish
         * smc-specific sk_data_ready function
         */
-       smc->clcsk_data_ready = smc->clcsock->sk->sk_data_ready;
-       smc->clcsock->sk->sk_data_ready = smc_clcsock_data_ready;
+       write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
        smc->clcsock->sk->sk_user_data =
                (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
+       smc_clcsock_replace_cb(&smc->clcsock->sk->sk_data_ready,
+                              smc_clcsock_data_ready, &smc->clcsk_data_ready);
+       write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
 
        /* save original ops */
        smc->ori_af_ops = inet_csk(smc->clcsock->sk)->icsk_af_ops;
@@ -2403,7 +2462,11 @@ static int smc_listen(struct socket *sock, int backlog)
 
        rc = kernel_listen(smc->clcsock, backlog);
        if (rc) {
-               smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
+               write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
+               smc_clcsock_restore_cb(&smc->clcsock->sk->sk_data_ready,
+                                      &smc->clcsk_data_ready);
+               smc->clcsock->sk->sk_user_data = NULL;
+               write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
                goto out;
        }
        sk->sk_max_ack_backlog = backlog;
@@ -2664,8 +2727,10 @@ static int smc_shutdown(struct socket *sock, int how)
        if (smc->use_fallback) {
                rc = kernel_sock_shutdown(smc->clcsock, how);
                sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
-               if (sk->sk_shutdown == SHUTDOWN_MASK)
+               if (sk->sk_shutdown == SHUTDOWN_MASK) {
                        sk->sk_state = SMC_CLOSED;
+                       sock_put(sk);
+               }
                goto out;
        }
        switch (how) {
index ea0620529ebea4b5e5fccf4fdc1da60059894f9d..5ed765ea0c731a7f0095cd6a99a0e42d227eaca9 100644 (file)
@@ -288,12 +288,41 @@ static inline struct smc_sock *smc_sk(const struct sock *sk)
        return (struct smc_sock *)sk;
 }
 
+static inline void smc_init_saved_callbacks(struct smc_sock *smc)
+{
+       smc->clcsk_state_change = NULL;
+       smc->clcsk_data_ready   = NULL;
+       smc->clcsk_write_space  = NULL;
+       smc->clcsk_error_report = NULL;
+}
+
 static inline struct smc_sock *smc_clcsock_user_data(const struct sock *clcsk)
 {
        return (struct smc_sock *)
               ((uintptr_t)clcsk->sk_user_data & ~SK_USER_DATA_NOCOPY);
 }
 
+/* save target_cb in saved_cb, and replace target_cb with new_cb */
+static inline void smc_clcsock_replace_cb(void (**target_cb)(struct sock *),
+                                         void (*new_cb)(struct sock *),
+                                         void (**saved_cb)(struct sock *))
+{
+       /* only save once */
+       if (!*saved_cb)
+               *saved_cb = *target_cb;
+       *target_cb = new_cb;
+}
+
+/* restore target_cb to saved_cb, and reset saved_cb to NULL */
+static inline void smc_clcsock_restore_cb(void (**target_cb)(struct sock *),
+                                         void (**saved_cb)(struct sock *))
+{
+       if (!*saved_cb)
+               return;
+       *target_cb = *saved_cb;
+       *saved_cb = NULL;
+}
+
 extern struct workqueue_struct *smc_hs_wq;     /* wq for handshake work */
 extern struct workqueue_struct *smc_close_wq;  /* wq for close work */
 
index ce27399b38b1ec2dc240a746af93fe86c18dbedf..f9f3f59c79de21e81c9d75ddc49e29af2bf3b52d 100644 (file)
@@ -191,7 +191,8 @@ static int smc_nl_ueid_dumpinfo(struct sk_buff *skb, u32 portid, u32 seq,
                          flags, SMC_NETLINK_DUMP_UEID);
        if (!hdr)
                return -ENOMEM;
-       snprintf(ueid_str, sizeof(ueid_str), "%s", ueid);
+       memcpy(ueid_str, ueid, SMC_MAX_EID_LEN);
+       ueid_str[SMC_MAX_EID_LEN] = 0;
        if (nla_put_string(skb, SMC_NLA_EID_TABLE_ENTRY, ueid_str)) {
                genlmsg_cancel(skb, hdr);
                return -EMSGSIZE;
@@ -252,7 +253,8 @@ int smc_nl_dump_seid(struct sk_buff *skb, struct netlink_callback *cb)
                goto end;
 
        smc_ism_get_system_eid(&seid);
-       snprintf(seid_str, sizeof(seid_str), "%s", seid);
+       memcpy(seid_str, seid, SMC_MAX_EID_LEN);
+       seid_str[SMC_MAX_EID_LEN] = 0;
        if (nla_put_string(skb, SMC_NLA_SEID_ENTRY, seid_str))
                goto err;
        read_lock(&smc_clc_eid_table.lock);
index 676cb2333d3c4b98f539eac4e4d69d71b4faf773..31db7438857c9f81fd336bfce74c5c92b7d275c6 100644 (file)
@@ -214,8 +214,11 @@ again:
                sk->sk_state = SMC_CLOSED;
                sk->sk_state_change(sk); /* wake up accept */
                if (smc->clcsock && smc->clcsock->sk) {
-                       smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
+                       write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
+                       smc_clcsock_restore_cb(&smc->clcsock->sk->sk_data_ready,
+                                              &smc->clcsk_data_ready);
                        smc->clcsock->sk->sk_user_data = NULL;
+                       write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
                        rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
                }
                smc_close_cleanup_listen(sk);
index 7984f88834720c7b26fb350579c55bc6bfc367c6..7055ed10e316271b665abddecc660d3b8ebdf8ad 100644 (file)
@@ -311,8 +311,9 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name)
        list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
                if (!strncmp(ibdev->ibdev->name, ib_name,
                             sizeof(ibdev->ibdev->name)) ||
-                   !strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name,
-                            IB_DEVICE_NAME_MAX - 1)) {
+                   (ibdev->ibdev->dev.parent &&
+                    !strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name,
+                            IB_DEVICE_NAME_MAX - 1))) {
                        goto out;
                }
        }
index 8bf2af8546d2fb886808d149d69bc90a95cd9b06..af0174d7ce5a8a4373151f62ec5ccf7d1141a89c 100644 (file)
@@ -1127,6 +1127,8 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
        struct rpc_task *task;
 
        task = rpc_new_task(task_setup_data);
+       if (IS_ERR(task))
+               return task;
 
        if (!RPC_IS_ASYNC(task))
                task->tk_flags |= RPC_TASK_CRED_NOREF;
@@ -1227,6 +1229,11 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req)
         * Create an rpc_task to send the data
         */
        task = rpc_new_task(&task_setup_data);
+       if (IS_ERR(task)) {
+               xprt_free_bc_request(req);
+               return task;
+       }
+
        xprt_init_bc_request(req, task);
 
        task->tk_action = call_bc_encode;
@@ -1858,6 +1865,9 @@ call_encode(struct rpc_task *task)
        xprt_request_dequeue_xprt(task);
        /* Encode here so that rpcsec_gss can use correct sequence number. */
        rpc_xdr_encode(task);
+       /* Add task to reply queue before transmission to avoid races */
+       if (task->tk_status == 0 && rpc_reply_expected(task))
+               task->tk_status = xprt_request_enqueue_receive(task);
        /* Did the encode result in an error condition? */
        if (task->tk_status != 0) {
                /* Was the error nonfatal? */
@@ -1881,9 +1891,6 @@ call_encode(struct rpc_task *task)
                return;
        }
 
-       /* Add task to reply queue before transmission to avoid races */
-       if (rpc_reply_expected(task))
-               xprt_request_enqueue_receive(task);
        xprt_request_enqueue_transmit(task);
 out:
        task->tk_action = call_transmit;
@@ -2200,6 +2207,7 @@ call_transmit_status(struct rpc_task *task)
                 * socket just returned a connection error,
                 * then hold onto the transport lock.
                 */
+       case -ENOMEM:
        case -ENOBUFS:
                rpc_delay(task, HZ>>2);
                fallthrough;
@@ -2283,6 +2291,7 @@ call_bc_transmit_status(struct rpc_task *task)
        case -ENOTCONN:
        case -EPIPE:
                break;
+       case -ENOMEM:
        case -ENOBUFS:
                rpc_delay(task, HZ>>2);
                fallthrough;
@@ -2365,6 +2374,11 @@ call_status(struct rpc_task *task)
        case -EPIPE:
        case -EAGAIN:
                break;
+       case -ENFILE:
+       case -ENOBUFS:
+       case -ENOMEM:
+               rpc_delay(task, HZ>>2);
+               break;
        case -EIO:
                /* shutdown or soft timeout */
                goto out_exit;
index b258b87a3ec22c54bc2f815e7db27b8d7eacf5d6..7f70c1e608b7ce4f07f3651e70a764050c292dc5 100644 (file)
@@ -1128,6 +1128,11 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
 
        if (task == NULL) {
                task = rpc_alloc_task();
+               if (task == NULL) {
+                       rpc_release_calldata(setup_data->callback_ops,
+                                            setup_data->callback_data);
+                       return ERR_PTR(-ENOMEM);
+               }
                flags = RPC_TASK_DYNAMIC;
        }
 
index 05b38bf68316a5aec2045fafb77b338e36f0c4ff..71ba4cf513bcef3c50220db640b4ab35e365925f 100644 (file)
@@ -221,12 +221,6 @@ static int xprt_send_kvec(struct socket *sock, struct msghdr *msg,
 static int xprt_send_pagedata(struct socket *sock, struct msghdr *msg,
                              struct xdr_buf *xdr, size_t base)
 {
-       int err;
-
-       err = xdr_alloc_bvec(xdr, rpc_task_gfp_mask());
-       if (err < 0)
-               return err;
-
        iov_iter_bvec(&msg->msg_iter, WRITE, xdr->bvec, xdr_buf_pagecount(xdr),
                      xdr->page_len + xdr->page_base);
        return xprt_sendmsg(sock, msg, base + xdr->page_base);
index 297c498550383f8d67d634921595f40ab9ec78c1..5b59e2103526ebbb15eb59cd0c1b41948d27a70f 100644 (file)
@@ -1231,6 +1231,8 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
                dr->daddr = rqstp->rq_daddr;
                dr->argslen = rqstp->rq_arg.len >> 2;
                dr->xprt_hlen = rqstp->rq_xprt_hlen;
+               dr->xprt_ctxt = rqstp->rq_xprt_ctxt;
+               rqstp->rq_xprt_ctxt = NULL;
 
                /* back up head to the start of the buffer and copy */
                skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
@@ -1269,6 +1271,7 @@ static noinline int svc_deferred_recv(struct svc_rqst *rqstp)
        rqstp->rq_xprt_hlen   = dr->xprt_hlen;
        rqstp->rq_daddr       = dr->daddr;
        rqstp->rq_respages    = rqstp->rq_pages;
+       rqstp->rq_xprt_ctxt   = dr->xprt_ctxt;
        svc_xprt_received(rqstp->rq_xprt);
        return (dr->argslen<<2) - dr->xprt_hlen;
 }
index 478f857cdaed4548e7c181b3a6cc41ecf5a1425b..cc35ec4334006749f1bb09290a6528e1b8aabf0f 100644 (file)
@@ -579,15 +579,18 @@ static int svc_udp_sendto(struct svc_rqst *rqstp)
        if (svc_xprt_is_dead(xprt))
                goto out_notconn;
 
+       err = xdr_alloc_bvec(xdr, GFP_KERNEL);
+       if (err < 0)
+               goto out_unlock;
+
        err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent);
-       xdr_free_bvec(xdr);
        if (err == -ECONNREFUSED) {
                /* ICMP error on earlier request. */
                err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent);
-               xdr_free_bvec(xdr);
        }
+       xdr_free_bvec(xdr);
        trace_svcsock_udp_send(xprt, err);
-
+out_unlock:
        mutex_unlock(&xprt->xpt_mutex);
        if (err < 0)
                return err;
@@ -1096,7 +1099,9 @@ static int svc_tcp_sendmsg(struct socket *sock, struct xdr_buf *xdr,
        int ret;
 
        *sentp = 0;
-       xdr_alloc_bvec(xdr, GFP_KERNEL);
+       ret = xdr_alloc_bvec(xdr, GFP_KERNEL);
+       if (ret < 0)
+               return ret;
 
        ret = kernel_sendmsg(sock, &msg, &rm, 1, rm.iov_len);
        if (ret < 0)
index 515501f79290984bfd6ea9f659000d3264860145..86d62cffba0dd178d9c9c2529197d2f171c192de 100644 (file)
 /*
  * Local functions
  */
-static void     xprt_init(struct rpc_xprt *xprt, struct net *net);
+static void    xprt_init(struct rpc_xprt *xprt, struct net *net);
 static __be32  xprt_alloc_xid(struct rpc_xprt *xprt);
-static void     xprt_destroy(struct rpc_xprt *xprt);
-static void     xprt_request_init(struct rpc_task *task);
+static void    xprt_destroy(struct rpc_xprt *xprt);
+static void    xprt_request_init(struct rpc_task *task);
+static int     xprt_request_prepare(struct rpc_rqst *req);
 
 static DEFINE_SPINLOCK(xprt_list_lock);
 static LIST_HEAD(xprt_list);
@@ -929,12 +930,7 @@ void xprt_connect(struct rpc_task *task)
        if (!xprt_lock_write(xprt, task))
                return;
 
-       if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
-               trace_xprt_disconnect_cleanup(xprt);
-               xprt->ops->close(xprt);
-       }
-
-       if (!xprt_connected(xprt)) {
+       if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
                task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
                rpc_sleep_on_timeout(&xprt->pending, task, NULL,
                                xprt_request_timeout(task->tk_rqstp));
@@ -1143,16 +1139,19 @@ xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
  * @task: RPC task
  *
  */
-void
+int
 xprt_request_enqueue_receive(struct rpc_task *task)
 {
        struct rpc_rqst *req = task->tk_rqstp;
        struct rpc_xprt *xprt = req->rq_xprt;
+       int ret;
 
        if (!xprt_request_need_enqueue_receive(task, req))
-               return;
+               return 0;
 
-       xprt_request_prepare(task->tk_rqstp);
+       ret = xprt_request_prepare(task->tk_rqstp);
+       if (ret)
+               return ret;
        spin_lock(&xprt->queue_lock);
 
        /* Update the softirq receive buffer */
@@ -1166,6 +1165,7 @@ xprt_request_enqueue_receive(struct rpc_task *task)
 
        /* Turn off autodisconnect */
        del_singleshot_timer_sync(&xprt->timer);
+       return 0;
 }
 
 /**
@@ -1452,14 +1452,16 @@ xprt_request_dequeue_xprt(struct rpc_task *task)
  *
  * Calls into the transport layer to do whatever is needed to prepare
  * the request for transmission or receive.
+ * Returns error, or zero.
  */
-void
+static int
 xprt_request_prepare(struct rpc_rqst *req)
 {
        struct rpc_xprt *xprt = req->rq_xprt;
 
        if (xprt->ops->prepare_request)
-               xprt->ops->prepare_request(req);
+               return xprt->ops->prepare_request(req);
+       return 0;
 }
 
 /**
index cf76a6ad127b26603d0c07ea1a8845dd80527577..864131a9fc6e38e99758e84c579f4d0e87bf7b74 100644 (file)
@@ -831,7 +831,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
                goto out_err;
        if (ret == 0)
                goto out_drop;
-       rqstp->rq_xprt_hlen = ret;
+       rqstp->rq_xprt_hlen = 0;
 
        if (svc_rdma_is_reverse_direction_reply(xprt, ctxt))
                goto out_backchannel;
index 78af7518f263ddcd9c6726698d802367aafa34d6..8ab64ea46870a7fa6463d77fc259c4ba6126f329 100644 (file)
@@ -822,12 +822,17 @@ static int xs_stream_nospace(struct rpc_rqst *req, bool vm_wait)
        return ret;
 }
 
-static void
+static int
 xs_stream_prepare_request(struct rpc_rqst *req)
 {
+       gfp_t gfp = rpc_task_gfp_mask();
+       int ret;
+
+       ret = xdr_alloc_bvec(&req->rq_snd_buf, gfp);
+       if (ret < 0)
+               return ret;
        xdr_free_bvec(&req->rq_rcv_buf);
-       req->rq_task->tk_status = xdr_alloc_bvec(
-               &req->rq_rcv_buf, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
+       return xdr_alloc_bvec(&req->rq_rcv_buf, gfp);
 }
 
 /*
@@ -879,7 +884,7 @@ static int xs_local_send_request(struct rpc_rqst *req)
 
        /* Close the stream if the previous transmission was incomplete */
        if (xs_send_request_was_aborted(transport, req)) {
-               xs_close(xprt);
+               xprt_force_disconnect(xprt);
                return -ENOTCONN;
        }
 
@@ -915,7 +920,7 @@ static int xs_local_send_request(struct rpc_rqst *req)
                        -status);
                fallthrough;
        case -EPIPE:
-               xs_close(xprt);
+               xprt_force_disconnect(xprt);
                status = -ENOTCONN;
        }
 
@@ -956,6 +961,9 @@ static int xs_udp_send_request(struct rpc_rqst *req)
        if (!xprt_request_get_cong(xprt, req))
                return -EBADSLT;
 
+       status = xdr_alloc_bvec(xdr, rpc_task_gfp_mask());
+       if (status < 0)
+               return status;
        req->rq_xtime = ktime_get();
        status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, 0, &sent);
 
@@ -1185,6 +1193,16 @@ static void xs_reset_transport(struct sock_xprt *transport)
 
        if (sk == NULL)
                return;
+       /*
+        * Make sure we're calling this in a context from which it is safe
+        * to call __fput_sync(). In practice that means rpciod and the
+        * system workqueue.
+        */
+       if (!(current->flags & PF_WQ_WORKER)) {
+               WARN_ON_ONCE(1);
+               set_bit(XPRT_CLOSE_WAIT, &xprt->state);
+               return;
+       }
 
        if (atomic_read(&transport->xprt.swapper))
                sk_clear_memalloc(sk);
@@ -1208,7 +1226,7 @@ static void xs_reset_transport(struct sock_xprt *transport)
        mutex_unlock(&transport->recv_mutex);
 
        trace_rpc_socket_close(xprt, sock);
-       fput(filp);
+       __fput_sync(filp);
 
        xprt_disconnect_done(xprt);
 }
@@ -2544,6 +2562,9 @@ static int bc_sendto(struct rpc_rqst *req)
        int err;
 
        req->rq_xtime = ktime_get();
+       err = xdr_alloc_bvec(xdr, rpc_task_gfp_mask());
+       if (err < 0)
+               return err;
        err = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, marker, &sent);
        xdr_free_bvec(xdr);
        if (err < 0 || sent != (xdr->len + sizeof(marker)))
index ee1c2b6b69711b97206a34aae86cb47acea93e54..21e808fcb676c29e2952c3e177a265671e22c4fa 100644 (file)
@@ -528,7 +528,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
                                   .len = IEEE80211_MAX_MESH_ID_LEN },
        [NL80211_ATTR_MPATH_NEXT_HOP] = NLA_POLICY_ETH_ADDR_COMPAT,
 
-       [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 },
+       /* allow 3 for NUL-termination, we used to declare this NLA_STRING */
+       [NL80211_ATTR_REG_ALPHA2] = NLA_POLICY_RANGE(NLA_BINARY, 2, 3),
        [NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED },
 
        [NL80211_ATTR_BSS_CTS_PROT] = { .type = NLA_U8 },
index b2fdac96bab07fd60a6aeacc9574b34ab582fc71..4a6d8643291064d4dd41c6e886477821b18e8982 100644 (file)
@@ -2018,11 +2018,13 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
                /* this is a nontransmitting bss, we need to add it to
                 * transmitting bss' list if it is not there
                 */
+               spin_lock_bh(&rdev->bss_lock);
                if (cfg80211_add_nontrans_list(non_tx_data->tx_bss,
                                               &res->pub)) {
                        if (__cfg80211_unlink_bss(rdev, res))
                                rdev->bss_generation++;
                }
+               spin_unlock_bh(&rdev->bss_lock);
        }
 
        trace_cfg80211_return_bss(&res->pub);
index 19aa994f5d2c2bffed1d306b2c395b45c7df25a3..00bd0ecff5a1bbc11378c9c80218a21359e08f50 100644 (file)
@@ -2593,12 +2593,14 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
 
                if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
                        __u32 mark = 0;
+                       int oif;
 
                        if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
                                mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
 
                        family = xfrm[i]->props.family;
-                       dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
+                       oif = fl->flowi_oif ? : fl->flowi_l3mdev;
+                       dst = xfrm_dst_lookup(xfrm[i], tos, oif,
                                              &saddr, &daddr, family, mark);
                        err = PTR_ERR(dst);
                        if (IS_ERR(dst))
index 2173a6729f30ed2a6410369cfd9ae6bf329500b0..9717e6f6fb31498aec1075220e346c769648decf 100644 (file)
@@ -40,8 +40,7 @@ include $(srctree)/scripts/Makefile.compiler
 
 # The filename Kbuild has precedence over Makefile
 kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src))
-kbuild-file := $(if $(wildcard $(kbuild-dir)/Kbuild),$(kbuild-dir)/Kbuild,$(kbuild-dir)/Makefile)
-include $(kbuild-file)
+include $(or $(wildcard $(kbuild-dir)/Kbuild),$(kbuild-dir)/Makefile)
 
 include $(srctree)/scripts/Makefile.lib
 
index fd6175322470a707c372b9d6ffdabc1b12221057..74cb1c5c36581d25111e8de47a6344b0f08570a6 100644 (file)
@@ -12,7 +12,7 @@ include $(srctree)/scripts/Kbuild.include
 
 # The filename Kbuild has precedence over Makefile
 kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src))
-include $(if $(wildcard $(kbuild-dir)/Kbuild), $(kbuild-dir)/Kbuild, $(kbuild-dir)/Makefile)
+include $(or $(wildcard $(kbuild-dir)/Kbuild),$(kbuild-dir)/Makefile)
 
 # Figure out what we need to build from the various variables
 # ==========================================================================
index c593475d4a931aa4cfef5b8152c52934acf11f67..9f69ecdd7977a3ae1f8649758b74ad3559694e78 100644 (file)
@@ -106,7 +106,7 @@ subdir-ym   := $(addprefix $(obj)/,$(subdir-ym))
 modname-multi = $(sort $(foreach m,$(multi-obj-ym),\
                $(if $(filter $*.o, $(call suffix-search, $m, .o, -objs -y -m)),$(m:.o=))))
 
-__modname = $(if $(modname-multi),$(modname-multi),$(basetarget))
+__modname = $(or $(modname-multi),$(basetarget))
 
 modname = $(subst $(space),:,$(__modname))
 modfile = $(addprefix $(obj)/,$(__modname))
@@ -241,20 +241,16 @@ $(foreach m, $(notdir $1), \
        $(addprefix $(obj)/, $(foreach s, $3, $($(m:%$(strip $2)=%$(s)))))))
 endef
 
-quiet_cmd_copy = COPY    $@
-      cmd_copy = cp $< $@
-
-# Shipped files
+# Copy a file
 # ===========================================================================
 # 'cp' preserves permissions. If you use it to copy a file in read-only srctree,
 # the copy would be read-only as well, leading to an error when executing the
 # rule next time. Use 'cat' instead in order to generate a writable file.
-
-quiet_cmd_shipped = SHIPPED $@
-cmd_shipped = cat $< > $@
+quiet_cmd_copy = COPY    $@
+      cmd_copy = cat $< > $@
 
 $(obj)/%: $(src)/%_shipped
-       $(call cmd,shipped)
+       $(call cmd,copy)
 
 # Commands useful for building a boot image
 # ===========================================================================
@@ -431,7 +427,7 @@ MKIMAGE := $(srctree)/scripts/mkuboot.sh
 # SRCARCH just happens to match slightly more than ARCH (on sparc), so reduces
 # the number of overrides in arch makefiles
 UIMAGE_ARCH ?= $(SRCARCH)
-UIMAGE_COMPRESSION ?= $(if $(2),$(2),none)
+UIMAGE_COMPRESSION ?= $(or $(2),none)
 UIMAGE_OPTS-y ?=
 UIMAGE_TYPE ?= kernel
 UIMAGE_LOADADDR ?= arch_must_set_this
index 44e887cff49b67f0facbd7148fd8b8932cc322eb..2328f9a641dadf53490cccfe26830517e3426bf0 100644 (file)
@@ -105,25 +105,6 @@ static void usage(void)
        exit(1);
 }
 
-/*
- * In the intended usage of this program, the stdout is redirected to .*.cmd
- * files. The return value of printf() must be checked to catch any error,
- * e.g. "No space left on device".
- */
-static void xprintf(const char *format, ...)
-{
-       va_list ap;
-       int ret;
-
-       va_start(ap, format);
-       ret = vprintf(format, ap);
-       if (ret < 0) {
-               perror("fixdep");
-               exit(1);
-       }
-       va_end(ap);
-}
-
 struct item {
        struct item     *next;
        unsigned int    len;
@@ -189,7 +170,7 @@ static void use_config(const char *m, int slen)
 
        define_config(m, slen, hash);
        /* Print out a dependency path from a symbol name. */
-       xprintf("    $(wildcard include/config/%.*s) \\\n", slen, m);
+       printf("    $(wildcard include/config/%.*s) \\\n", slen, m);
 }
 
 /* test if s ends in sub */
@@ -318,13 +299,13 @@ static void parse_dep_file(char *m, const char *target)
                                 */
                                if (!saw_any_target) {
                                        saw_any_target = 1;
-                                       xprintf("source_%s := %s\n\n",
-                                               target, m);
-                                       xprintf("deps_%s := \\\n", target);
+                                       printf("source_%s := %s\n\n",
+                                              target, m);
+                                       printf("deps_%s := \\\n", target);
                                }
                                is_first_dep = 0;
                        } else {
-                               xprintf("  %s \\\n", m);
+                               printf("  %s \\\n", m);
                        }
 
                        buf = read_file(m);
@@ -347,8 +328,8 @@ static void parse_dep_file(char *m, const char *target)
                exit(1);
        }
 
-       xprintf("\n%s: $(deps_%s)\n\n", target, target);
-       xprintf("$(deps_%s):\n", target);
+       printf("\n%s: $(deps_%s)\n\n", target, target);
+       printf("$(deps_%s):\n", target);
 }
 
 int main(int argc, char *argv[])
@@ -363,11 +344,22 @@ int main(int argc, char *argv[])
        target = argv[2];
        cmdline = argv[3];
 
-       xprintf("cmd_%s := %s\n\n", target, cmdline);
+       printf("cmd_%s := %s\n\n", target, cmdline);
 
        buf = read_file(depfile);
        parse_dep_file(buf, target);
        free(buf);
 
+       fflush(stdout);
+
+       /*
+        * In the intended usage, the stdout is redirected to .*.cmd files.
+        * Call ferror() to catch errors such as "No space left on device".
+        */
+       if (ferror(stdout)) {
+               fprintf(stderr, "fixdep: not all data was written to the output\n");
+               exit(1);
+       }
+
        return 0;
 }
index 589454bce93013e5fb0802873f0589b00721b20b..8425da41de0dab49c33c2f93e4eb7f1ccf095d79 100644 (file)
@@ -86,25 +86,31 @@ static struct plugin_info latent_entropy_plugin_info = {
        .help           = "disable\tturn off latent entropy instrumentation\n",
 };
 
-static unsigned HOST_WIDE_INT seed;
-/*
- * get_random_seed() (this is a GCC function) generates the seed.
- * This is a simple random generator without any cryptographic security because
- * the entropy doesn't come from here.
- */
+static unsigned HOST_WIDE_INT deterministic_seed;
+static unsigned HOST_WIDE_INT rnd_buf[32];
+static size_t rnd_idx = ARRAY_SIZE(rnd_buf);
+static int urandom_fd = -1;
+
 static unsigned HOST_WIDE_INT get_random_const(void)
 {
-       unsigned int i;
-       unsigned HOST_WIDE_INT ret = 0;
-
-       for (i = 0; i < 8 * sizeof(ret); i++) {
-               ret = (ret << 1) | (seed & 1);
-               seed >>= 1;
-               if (ret & 1)
-                       seed ^= 0xD800000000000000ULL;
+       if (deterministic_seed) {
+               unsigned HOST_WIDE_INT w = deterministic_seed;
+               w ^= w << 13;
+               w ^= w >> 7;
+               w ^= w << 17;
+               deterministic_seed = w;
+               return deterministic_seed;
        }
 
-       return ret;
+       if (urandom_fd < 0) {
+               urandom_fd = open("/dev/urandom", O_RDONLY);
+               gcc_assert(urandom_fd >= 0);
+       }
+       if (rnd_idx >= ARRAY_SIZE(rnd_buf)) {
+               gcc_assert(read(urandom_fd, rnd_buf, sizeof(rnd_buf)) == sizeof(rnd_buf));
+               rnd_idx = 0;
+       }
+       return rnd_buf[rnd_idx++];
 }
 
 static tree tree_get_random_const(tree type)
@@ -537,8 +543,6 @@ static void latent_entropy_start_unit(void *gcc_data __unused,
        tree type, id;
        int quals;
 
-       seed = get_random_seed(false);
-
        if (in_lto_p)
                return;
 
@@ -573,6 +577,12 @@ __visible int plugin_init(struct plugin_name_args *plugin_info,
        const struct plugin_argument * const argv = plugin_info->argv;
        int i;
 
+       /*
+        * Call get_random_seed() with noinit=true, so that this returns
+        * 0 in the case where no seed has been passed via -frandom-seed.
+        */
+       deterministic_seed = get_random_seed(true);
+
        static const struct ggc_root_tab gt_ggc_r_gt_latent_entropy[] = {
                {
                        .base = &latent_entropy_decl,
index 7437e19ba3ac148a76b605b9bd85b382193c10ba..1389db76cff31d1fef8f5a075bfac8d5040b7df2 100755 (executable)
@@ -327,7 +327,7 @@ sub output_rest {
                my @filepath = split / /, $data{$what}->{filepath};
 
                if ($enable_lineno) {
-                       printf "#define LINENO %s%s#%s\n\n",
+                       printf ".. LINENO %s%s#%s\n\n",
                               $prefix, $file[0],
                               $data{$what}->{line_no};
                }
@@ -1023,7 +1023,7 @@ logic (B<--no-rst-source>).
 
 =item B<--enable-lineno>
 
-Enable output of #define LINENO lines.
+Enable output of .. LINENO lines.
 
 =item B<--debug> I<debug level>
 
index 457712355676a47cd8374c5b19c793509b6b6491..76cfb96b59b6dae92c65af3655a2e1e7107f31b8 100755 (executable)
@@ -13,6 +13,7 @@ my $man;
 my $debug;
 my $arch;
 my $feat;
+my $enable_fname;
 
 my $basename = abs_path($0);
 $basename =~ s,/[^/]+$,/,;
@@ -31,6 +32,7 @@ GetOptions(
        'arch=s' => \$arch,
        'feat=s' => \$feat,
        'feature=s' => \$feat,
+       "enable-fname" => \$enable_fname,
        man => \$man
 ) or pod2usage(2);
 
@@ -95,6 +97,10 @@ sub parse_feat {
        return if ($file =~ m,($prefix)/arch-support.txt,);
        return if (!($file =~ m,arch-support.txt$,));
 
+       if ($enable_fname) {
+               printf ".. FILE %s\n", abs_path($file);
+       }
+
        my $subsys = "";
        $subsys = $2 if ( m,.*($prefix)/([^/]+).*,);
 
@@ -580,6 +586,11 @@ Output features for a single specific feature.
 Changes the location of the Feature files. By default, it uses
 the Documentation/features directory.
 
+=item B<--enable-fname>
+
+Prints the file name of the feature files. This can be used in order to
+track dependencies during documentation build.
+
 =item B<--debug>
 
 Put the script in verbose mode, useful for debugging. Can be called multiple
index 54ad86d13784995cfb8eb57bd0827204c6c36886..8caabddf817ca0d69793935d20e9dcf75f1a4652 100644 (file)
@@ -108,7 +108,7 @@ static bool is_ignored_symbol(const char *name, char type)
        /* Symbol names that begin with the following are ignored.*/
        static const char * const ignored_prefixes[] = {
                "$",                    /* local symbols for ARM, MIPS, etc. */
-               ".LASANPC",             /* s390 kasan local symbols */
+               ".L",                   /* local labels, .LBB,.Ltmpxxx,.L__unnamed_xx,.LASANPC, etc. */
                "__crc_",               /* modversions */
                "__efistub_",           /* arm64 EFI stub namespace */
                "__kvm_nvhe_",          /* arm64 non-VHE KVM namespace */
index d3c3a61308adaecbfa77e8b79d71878adefd6fa2..c4340c90e172f875739dd19a8939d812cf66c978 100644 (file)
@@ -658,13 +658,6 @@ static char *escape_string_value(const char *in)
        return out;
 }
 
-/*
- * Kconfig configuration printer
- *
- * This printer is used when generating the resulting configuration after
- * kconfig invocation and `defconfig' files. Unset symbol might be omitted by
- * passing a non-NULL argument to the printer.
- */
 enum output_n { OUTPUT_N, OUTPUT_N_AS_UNSET, OUTPUT_N_NONE };
 
 static void __print_symbol(FILE *fp, struct symbol *sym, enum output_n output_n,
@@ -903,19 +896,20 @@ next:
                        menu = menu->list;
                        continue;
                }
-               if (menu->next)
+
+end_check:
+               if (!menu->sym && menu_is_visible(menu) && menu != &rootmenu &&
+                   menu->prompt->type == P_MENU) {
+                       fprintf(out, "# end of %s\n", menu_get_prompt(menu));
+                       need_newline = true;
+               }
+
+               if (menu->next) {
                        menu = menu->next;
-               else while ((menu = menu->parent)) {
-                       if (!menu->sym && menu_is_visible(menu) &&
-                           menu != &rootmenu) {
-                               str = menu_get_prompt(menu);
-                               fprintf(out, "# end of %s\n", str);
-                               need_newline = true;
-                       }
-                       if (menu->next) {
-                               menu = menu->next;
-                               break;
-                       }
+               } else {
+                       menu = menu->parent;
+                       if (menu)
+                               goto end_check;
                }
        }
        fclose(out);
@@ -979,6 +973,7 @@ static int conf_write_autoconf_cmd(const char *autoconf_name)
 
        fprintf(out, "\n$(deps_config): ;\n");
 
+       fflush(out);
        ret = ferror(out); /* error check for all fprintf() calls */
        fclose(out);
        if (ret)
@@ -1097,6 +1092,7 @@ static int __conf_write_autoconf(const char *filename,
                if ((sym->flags & SYMBOL_WRITE) && sym->name)
                        print_symbol(file, sym);
 
+       fflush(file);
        /* check possible errors in conf_write_heading() and print_symbol() */
        ret = ferror(file);
        fclose(file);
index 9c084a2ba3b0519921aceec34ceb7dae4f6e9e67..7516949bb049e39fc76414317b229dd10d070241 100755 (executable)
@@ -424,7 +424,7 @@ sub get_kernel_version() {
 sub print_lineno {
     my $lineno = shift;
     if ($enable_lineno && defined($lineno)) {
-        print "#define LINENO " . $lineno . "\n";
+        print ".. LINENO " . $lineno . "\n";
     }
 }
 ##
@@ -2478,7 +2478,7 @@ May be specified multiple times.
 
 =item -enable-lineno
 
-Enable output of #define LINENO lines.
+Enable output of .. LINENO lines.
 
 =back
 
index f704034ebbe678679d959e9f5955e4b06c19a694..20f44504a644b900eafebe483c2d2a6f14dbe46f 100755 (executable)
@@ -50,7 +50,7 @@ gen_initcalls()
 {
        info GEN .tmp_initcalls.lds
 
-       ${PYTHON} ${srctree}/scripts/jobserver-exec             \
+       ${PYTHON3} ${srctree}/scripts/jobserver-exec            \
        ${PERL} ${srctree}/scripts/generate_initcall_order.pl   \
                ${KBUILD_VMLINUX_OBJS} ${KBUILD_VMLINUX_LIBS}   \
                > .tmp_initcalls.lds
index d10f93aac1c8215169c3c3c86aef6f1039b52a28..ed9d056d2108a2c4123f63f812fff1c0686392ba 100644 (file)
@@ -674,7 +674,7 @@ static void handle_modversion(const struct module *mod,
        unsigned int crc;
 
        if (sym->st_shndx == SHN_UNDEF) {
-               warn("EXPORT symbol \"%s\" [%s%s] version ...\n"
+               warn("EXPORT symbol \"%s\" [%s%s] version generation failed, symbol will not be versioned.\n"
                     "Is \"%s\" prototyped in <asm/asm-prototypes.h>?\n",
                     symname, mod->name, mod->is_vmlinux ? "" : ".ko",
                     symname);
index 1d2d71cc1f36ce4af728f7bbefc3deb9cb63399a..9b2c4925585a3a761019cd8b5358ecf09279a29c 100644 (file)
@@ -166,7 +166,7 @@ config HARDENED_USERCOPY
 config HARDENED_USERCOPY_PAGESPAN
        bool "Refuse to copy allocations that span multiple pages"
        depends on HARDENED_USERCOPY
-       depends on EXPERT
+       depends on BROKEN
        help
          When a multi-page allocation is done without __GFP_COMP,
          hardened usercopy will reject attempts to copy it. There are,
index 31ba7024e3addfd5e7ffdac9f792a2bab27e1828..726a8353201f834048f5a847c03fe95806441459 100644 (file)
@@ -209,6 +209,12 @@ static void __snd_card_release(struct device *dev, void *data)
  * snd_card_register(), the very first devres action to call snd_card_free()
  * is added automatically.  In that way, the resource disconnection is assured
  * at first, then released in the expected order.
+ *
+ * If an error happens at the probe before snd_card_register() is called and
+ * there have been other devres resources, you'd need to free the card manually
+ * via snd_card_free() call in the error; otherwise it may lead to UAF due to
+ * devres call orders.  You can use snd_card_free_on_error() helper for
+ * handling it more easily.
  */
 int snd_devm_card_new(struct device *parent, int idx, const char *xid,
                      struct module *module, size_t extra_size,
@@ -235,6 +241,28 @@ int snd_devm_card_new(struct device *parent, int idx, const char *xid,
 }
 EXPORT_SYMBOL_GPL(snd_devm_card_new);
 
+/**
+ * snd_card_free_on_error - a small helper for handling devm probe errors
+ * @dev: the managed device object
+ * @ret: the return code from the probe callback
+ *
+ * This function handles the explicit snd_card_free() call at the error from
+ * the probe callback.  It's just a small helper for simplifying the error
+ * handling with the managed devices.
+ */
+int snd_card_free_on_error(struct device *dev, int ret)
+{
+       struct snd_card *card;
+
+       if (!ret)
+               return 0;
+       card = devres_find(dev, __snd_card_release, NULL, NULL);
+       if (card)
+               snd_card_free(card);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(snd_card_free_on_error);
+
 static int snd_card_init(struct snd_card *card, struct device *parent,
                         int idx, const char *xid, struct module *module,
                         size_t extra_size)
index 6fd763d4d15b19457707d7b12b45579e87ac77fe..15dc7160ba34e8433b3de4ef5467555e64be4516 100644 (file)
@@ -499,6 +499,10 @@ static const struct snd_malloc_ops snd_dma_wc_ops = {
 };
 #endif /* CONFIG_X86 */
 
+#ifdef CONFIG_SND_DMA_SGBUF
+static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
+#endif
+
 /*
  * Non-contiguous pages allocator
  */
@@ -509,8 +513,18 @@ static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
 
        sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
                                      DEFAULT_GFP, 0);
-       if (!sgt)
+       if (!sgt) {
+#ifdef CONFIG_SND_DMA_SGBUF
+               if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
+                       dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
+               else
+                       dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
+               return snd_dma_sg_fallback_alloc(dmab, size);
+#else
                return NULL;
+#endif
+       }
+
        dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
                                            sg_dma_address(sgt->sgl));
        p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
@@ -633,6 +647,8 @@ static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
 
        if (!p)
                return NULL;
+       if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG)
+               return p;
        for_each_sgtable_page(sgt, &iter, 0)
                set_memory_wc(sg_wc_address(&iter), 1);
        return p;
@@ -665,6 +681,95 @@ static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
        .get_page = snd_dma_noncontig_get_page,
        .get_chunk_size = snd_dma_noncontig_get_chunk_size,
 };
+
+/* Fallback SG-buffer allocations for x86 */
+struct snd_dma_sg_fallback {
+       size_t count;
+       struct page **pages;
+       dma_addr_t *addrs;
+};
+
+static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
+                                      struct snd_dma_sg_fallback *sgbuf)
+{
+       size_t i;
+
+       if (sgbuf->count && dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
+               set_pages_array_wb(sgbuf->pages, sgbuf->count);
+       for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++)
+               dma_free_coherent(dmab->dev.dev, PAGE_SIZE,
+                                 page_address(sgbuf->pages[i]),
+                                 sgbuf->addrs[i]);
+       kvfree(sgbuf->pages);
+       kvfree(sgbuf->addrs);
+       kfree(sgbuf);
+}
+
+static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+       struct snd_dma_sg_fallback *sgbuf;
+       struct page **pages;
+       size_t i, count;
+       void *p;
+
+       sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
+       if (!sgbuf)
+               return NULL;
+       count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
+       if (!pages)
+               goto error;
+       sgbuf->pages = pages;
+       sgbuf->addrs = kvcalloc(count, sizeof(*sgbuf->addrs), GFP_KERNEL);
+       if (!sgbuf->addrs)
+               goto error;
+
+       for (i = 0; i < count; sgbuf->count++, i++) {
+               p = dma_alloc_coherent(dmab->dev.dev, PAGE_SIZE,
+                                      &sgbuf->addrs[i], DEFAULT_GFP);
+               if (!p)
+                       goto error;
+               sgbuf->pages[i] = virt_to_page(p);
+       }
+
+       if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
+               set_pages_array_wc(pages, count);
+       p = vmap(pages, count, VM_MAP, PAGE_KERNEL);
+       if (!p)
+               goto error;
+       dmab->private_data = sgbuf;
+       return p;
+
+ error:
+       __snd_dma_sg_fallback_free(dmab, sgbuf);
+       return NULL;
+}
+
+static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
+{
+       vunmap(dmab->area);
+       __snd_dma_sg_fallback_free(dmab, dmab->private_data);
+}
+
+static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
+                                   struct vm_area_struct *area)
+{
+       struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
+
+       if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
+               area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
+       return vm_map_pages(area, sgbuf->pages, sgbuf->count);
+}
+
+static const struct snd_malloc_ops snd_dma_sg_fallback_ops = {
+       .alloc = snd_dma_sg_fallback_alloc,
+       .free = snd_dma_sg_fallback_free,
+       .mmap = snd_dma_sg_fallback_mmap,
+       /* reuse vmalloc helpers */
+       .get_addr = snd_dma_vmalloc_get_addr,
+       .get_page = snd_dma_vmalloc_get_page,
+       .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
+};
 #endif /* CONFIG_SND_DMA_SGBUF */
 
 /*
@@ -736,6 +841,10 @@ static const struct snd_malloc_ops *dma_ops[] = {
 #ifdef CONFIG_GENERIC_ALLOCATOR
        [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
 #endif /* CONFIG_GENERIC_ALLOCATOR */
+#ifdef CONFIG_SND_DMA_SGBUF
+       [SNDRV_DMA_TYPE_DEV_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
+       [SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
+#endif
 #endif /* CONFIG_HAS_DMA */
 };
 
index edd9849210f2d8b4a863f7dce327b3728d5e0a81..977d54320a5cabd858f6fce624de9c4f9fd430d0 100644 (file)
@@ -970,6 +970,7 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream,
 
        runtime->status->state = SNDRV_PCM_STATE_OPEN;
        mutex_init(&runtime->buffer_mutex);
+       atomic_set(&runtime->buffer_accessing, 0);
 
        substream->runtime = runtime;
        substream->private_data = pcm->private_data;
index a40a35e51fad7c842af674923e52aad9ce127f70..1fc7c50ffa625bc3ec38486b1cf251c96acb39b4 100644 (file)
@@ -1906,11 +1906,9 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
                if (avail >= runtime->twake)
                        break;
                snd_pcm_stream_unlock_irq(substream);
-               mutex_unlock(&runtime->buffer_mutex);
 
                tout = schedule_timeout(wait_time);
 
-               mutex_lock(&runtime->buffer_mutex);
                snd_pcm_stream_lock_irq(substream);
                set_current_state(TASK_INTERRUPTIBLE);
                switch (runtime->status->state) {
@@ -2221,7 +2219,6 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
 
        nonblock = !!(substream->f_flags & O_NONBLOCK);
 
-       mutex_lock(&runtime->buffer_mutex);
        snd_pcm_stream_lock_irq(substream);
        err = pcm_accessible_state(runtime);
        if (err < 0)
@@ -2276,6 +2273,10 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
                        err = -EINVAL;
                        goto _end_unlock;
                }
+               if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) {
+                       err = -EBUSY;
+                       goto _end_unlock;
+               }
                snd_pcm_stream_unlock_irq(substream);
                if (!is_playback)
                        snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
@@ -2284,6 +2285,7 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
                if (is_playback)
                        snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
                snd_pcm_stream_lock_irq(substream);
+               atomic_dec(&runtime->buffer_accessing);
                if (err < 0)
                        goto _end_unlock;
                err = pcm_accessible_state(runtime);
@@ -2313,7 +2315,6 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
        if (xfer > 0 && err >= 0)
                snd_pcm_update_state(substream, runtime);
        snd_pcm_stream_unlock_irq(substream);
-       mutex_unlock(&runtime->buffer_mutex);
        return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
 }
 EXPORT_SYMBOL(__snd_pcm_lib_xfer);
index 4866aed97aacc074b8a88e5ff4218ca59d5b7881..5588b6a1ee8bd0c7fe57388c755899066b312943 100644 (file)
@@ -433,7 +433,7 @@ int snd_pcm_format_set_silence(snd_pcm_format_t format, void *data, unsigned int
                return 0;
        width = pcm_formats[(INT)format].phys; /* physical width */
        pat = pcm_formats[(INT)format].silence;
-       if (! width)
+       if (!width || !pat)
                return -EINVAL;
        /* signed or 1 byte data */
        if (pcm_formats[(INT)format].signd == 1 || width <= 8) {
index 704fdc9ebf911a053397b6a92b5d1be77030ac23..4adaee62ef33329d2f3f4d967be9be7d9b26dbbd 100644 (file)
@@ -685,6 +685,24 @@ static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
        return 0;
 }
 
+/* acquire buffer_mutex; if it's in r/w operation, return -EBUSY, otherwise
+ * block the further r/w operations
+ */
+static int snd_pcm_buffer_access_lock(struct snd_pcm_runtime *runtime)
+{
+       if (!atomic_dec_unless_positive(&runtime->buffer_accessing))
+               return -EBUSY;
+       mutex_lock(&runtime->buffer_mutex);
+       return 0; /* keep buffer_mutex, unlocked by below */
+}
+
+/* release buffer_mutex and clear r/w access flag */
+static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime)
+{
+       mutex_unlock(&runtime->buffer_mutex);
+       atomic_inc(&runtime->buffer_accessing);
+}
+
 #if IS_ENABLED(CONFIG_SND_PCM_OSS)
 #define is_oss_stream(substream)       ((substream)->oss.oss)
 #else
@@ -695,14 +713,16 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
                             struct snd_pcm_hw_params *params)
 {
        struct snd_pcm_runtime *runtime;
-       int err = 0, usecs;
+       int err, usecs;
        unsigned int bits;
        snd_pcm_uframes_t frames;
 
        if (PCM_RUNTIME_CHECK(substream))
                return -ENXIO;
        runtime = substream->runtime;
-       mutex_lock(&runtime->buffer_mutex);
+       err = snd_pcm_buffer_access_lock(runtime);
+       if (err < 0)
+               return err;
        snd_pcm_stream_lock_irq(substream);
        switch (runtime->status->state) {
        case SNDRV_PCM_STATE_OPEN:
@@ -820,7 +840,7 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
                        snd_pcm_lib_free_pages(substream);
        }
  unlock:
-       mutex_unlock(&runtime->buffer_mutex);
+       snd_pcm_buffer_access_unlock(runtime);
        return err;
 }
 
@@ -865,7 +885,9 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
        if (PCM_RUNTIME_CHECK(substream))
                return -ENXIO;
        runtime = substream->runtime;
-       mutex_lock(&runtime->buffer_mutex);
+       result = snd_pcm_buffer_access_lock(runtime);
+       if (result < 0)
+               return result;
        snd_pcm_stream_lock_irq(substream);
        switch (runtime->status->state) {
        case SNDRV_PCM_STATE_SETUP:
@@ -884,7 +906,7 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
        snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
        cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
  unlock:
-       mutex_unlock(&runtime->buffer_mutex);
+       snd_pcm_buffer_access_unlock(runtime);
        return result;
 }
 
@@ -1369,12 +1391,15 @@ static int snd_pcm_action_nonatomic(const struct action_ops *ops,
 
        /* Guarantee the group members won't change during non-atomic action */
        down_read(&snd_pcm_link_rwsem);
-       mutex_lock(&substream->runtime->buffer_mutex);
+       res = snd_pcm_buffer_access_lock(substream->runtime);
+       if (res < 0)
+               goto unlock;
        if (snd_pcm_stream_linked(substream))
                res = snd_pcm_action_group(ops, substream, state, false);
        else
                res = snd_pcm_action_single(ops, substream, state);
-       mutex_unlock(&substream->runtime->buffer_mutex);
+       snd_pcm_buffer_access_unlock(substream->runtime);
+ unlock:
        up_read(&snd_pcm_link_rwsem);
        return res;
 }
index 11235baaf6fa520f188d3ffcab470e7c020bb5b0..f212f233ea618efffb6ebe1d0395aaef80bb651e 100644 (file)
@@ -693,8 +693,6 @@ static int snd_mtpav_probe(struct platform_device *dev)
        mtp_card->outmidihwport = 0xffffffff;
        timer_setup(&mtp_card->timer, snd_mtpav_output_timer, 0);
 
-       card->private_free = snd_mtpav_free;
-
        err = snd_mtpav_get_RAWMIDI(mtp_card);
        if (err < 0)
                return err;
@@ -716,6 +714,8 @@ static int snd_mtpav_probe(struct platform_device *dev)
        if (err < 0)
                return err;
 
+       card->private_free = snd_mtpav_free;
+
        platform_set_drvdata(dev, card);
        printk(KERN_INFO "Motu MidiTimePiece on parallel port irq: %d ioport: 0x%lx\n", irq, port);
        return 0;
index efe810af28c53ae819b4e46494c444061e1e9f61..48b8ed752b69c594f91c82be86c714a5deb5da46 100644 (file)
@@ -116,16 +116,25 @@ static int i915_component_master_match(struct device *dev, int subcomponent,
        return 0;
 }
 
-/* check whether intel graphics is present */
-static bool i915_gfx_present(void)
+/* check whether Intel graphics is present and reachable */
+static int i915_gfx_present(struct pci_dev *hdac_pci)
 {
-       static const struct pci_device_id ids[] = {
-               { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_ANY_ID),
-                 .class = PCI_BASE_CLASS_DISPLAY << 16,
-                 .class_mask = 0xff << 16 },
-               {}
-       };
-       return pci_dev_present(ids);
+       unsigned int class = PCI_BASE_CLASS_DISPLAY << 16;
+       struct pci_dev *display_dev = NULL;
+       bool match = false;
+
+       do {
+               display_dev = pci_get_class(class, display_dev);
+
+               if (display_dev && display_dev->vendor == PCI_VENDOR_ID_INTEL &&
+                   connectivity_check(display_dev, hdac_pci))
+                       match = true;
+
+               pci_dev_put(display_dev);
+
+       } while (!match && display_dev);
+
+       return match;
 }
 
 /**
@@ -145,7 +154,7 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
        struct drm_audio_component *acomp;
        int err;
 
-       if (!i915_gfx_present())
+       if (!i915_gfx_present(to_pci_dev(bus->dev)))
                return -ENODEV;
 
        err = snd_hdac_acomp_init(bus, NULL,
index 70fd8b13938eddd0c588ea0615e7708f936a5ad7..8b0a16ba27d39dd641616b4ac60f5e3ba2cb221d 100644 (file)
@@ -390,22 +390,36 @@ static const struct config_entry config_table[] = {
 
 /* Alder Lake */
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_ALDERLAKE)
+       /* Alderlake-S */
        {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x7ad0,
        },
+       /* RaptorLake-S */
        {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
-               .device = 0x51c8,
+               .device = 0x7a50,
        },
+       /* Alderlake-P */
        {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
-               .device = 0x51cc,
+               .device = 0x51c8,
        },
        {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x51cd,
        },
+       /* Alderlake-PS */
+       {
+               .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+               .device = 0x51c9,
+       },
+       /* Alderlake-M */
+       {
+               .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+               .device = 0x51cc,
+       },
+       /* Alderlake-N */
        {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x54c8,
index b6bdebd9ef27577618d94ee59433de9cdb777a08..10112e1bb25dc2373c77ae3ef0d120d328a91030 100644 (file)
@@ -494,7 +494,7 @@ static int snd_cs423x_pnpbios_detect(struct pnp_dev *pdev,
        static int dev;
        int err;
        struct snd_card *card;
-       struct pnp_dev *cdev;
+       struct pnp_dev *cdev, *iter;
        char cid[PNP_ID_LEN];
 
        if (pnp_device_is_isapnp(pdev))
@@ -510,9 +510,11 @@ static int snd_cs423x_pnpbios_detect(struct pnp_dev *pdev,
        strcpy(cid, pdev->id[0].id);
        cid[5] = '1';
        cdev = NULL;
-       list_for_each_entry(cdev, &(pdev->protocol->devices), protocol_list) {
-               if (!strcmp(cdev->id[0].id, cid))
+       list_for_each_entry(iter, &(pdev->protocol->devices), protocol_list) {
+               if (!strcmp(iter->id[0].id, cid)) {
+                       cdev = iter;
                        break;
+               }
        }
        err = snd_cs423x_card_new(&pdev->dev, dev, &card);
        if (err < 0)
index ea001c80149ddd3cf3245878d97ac72af87eb86c..3164eb8510fa4c03121061e762b863e46629e3d1 100644 (file)
@@ -478,7 +478,7 @@ static void snd_galaxy_free(struct snd_card *card)
                galaxy_set_config(galaxy, galaxy->config);
 }
 
-static int snd_galaxy_probe(struct device *dev, unsigned int n)
+static int __snd_galaxy_probe(struct device *dev, unsigned int n)
 {
        struct snd_galaxy *galaxy;
        struct snd_wss *chip;
@@ -598,6 +598,11 @@ static int snd_galaxy_probe(struct device *dev, unsigned int n)
        return 0;
 }
 
+static int snd_galaxy_probe(struct device *dev, unsigned int n)
+{
+       return snd_card_free_on_error(dev, __snd_galaxy_probe(dev, n));
+}
+
 static struct isa_driver snd_galaxy_driver = {
        .match          = snd_galaxy_match,
        .probe          = snd_galaxy_probe,
index 26ab7ff8076845ee703b0cb202262777e71f0606..60398fced046b05f47aba4ad45538ccf4687a7e8 100644 (file)
@@ -537,7 +537,7 @@ static void snd_sc6000_free(struct snd_card *card)
                sc6000_setup_board(vport, 0);
 }
 
-static int snd_sc6000_probe(struct device *devptr, unsigned int dev)
+static int __snd_sc6000_probe(struct device *devptr, unsigned int dev)
 {
        static const int possible_irqs[] = { 5, 7, 9, 10, 11, -1 };
        static const int possible_dmas[] = { 1, 3, 0, -1 };
@@ -662,6 +662,11 @@ static int snd_sc6000_probe(struct device *devptr, unsigned int dev)
        return 0;
 }
 
+static int snd_sc6000_probe(struct device *devptr, unsigned int dev)
+{
+       return snd_card_free_on_error(devptr, __snd_sc6000_probe(devptr, dev));
+}
+
 static struct isa_driver snd_sc6000_driver = {
        .match          = snd_sc6000_match,
        .probe          = snd_sc6000_probe,
index c1c52b479da264904353d7a7532008885c7d4b12..ad8ce6a1c25c7db3439817d57a7cb55758fa8ab2 100644 (file)
@@ -88,11 +88,7 @@ static inline int ioctl_return(int __user *addr, int value)
      */
 
 extern int dmasound_init(void);
-#ifdef MODULE
 extern void dmasound_deinit(void);
-#else
-#define dmasound_deinit()      do { } while (0)
-#endif
 
 /* description of the set-up applies to either hard or soft settings */
 
@@ -114,9 +110,7 @@ typedef struct {
     void *(*dma_alloc)(unsigned int, gfp_t);
     void (*dma_free)(void *, unsigned int);
     int (*irqinit)(void);
-#ifdef MODULE
     void (*irqcleanup)(void);
-#endif
     void (*init)(void);
     void (*silence)(void);
     int (*setFormat)(int);
index 0c95828ac0b18fff309c3b611cfbc2bc81672f9d..9c48f3a9e3d1ae88460455ae7405e6f9f6f3ba0d 100644 (file)
@@ -206,12 +206,10 @@ module_param(writeBufSize, int, 0);
 
 MODULE_LICENSE("GPL");
 
-#ifdef MODULE
 static int sq_unit = -1;
 static int mixer_unit = -1;
 static int state_unit = -1;
 static int irq_installed;
-#endif /* MODULE */
 
 /* control over who can modify resources shared between play/record */
 static fmode_t shared_resource_owner;
@@ -391,9 +389,6 @@ static const struct file_operations mixer_fops =
 
 static void mixer_init(void)
 {
-#ifndef MODULE
-       int mixer_unit;
-#endif
        mixer_unit = register_sound_mixer(&mixer_fops, -1);
        if (mixer_unit < 0)
                return;
@@ -1171,9 +1166,6 @@ static const struct file_operations sq_fops =
 static int sq_init(void)
 {
        const struct file_operations *fops = &sq_fops;
-#ifndef MODULE
-       int sq_unit;
-#endif
 
        sq_unit = register_sound_dsp(fops, -1);
        if (sq_unit < 0) {
@@ -1366,9 +1358,6 @@ static const struct file_operations state_fops = {
 
 static int state_init(void)
 {
-#ifndef MODULE
-       int state_unit;
-#endif
        state_unit = register_sound_special(&state_fops, SND_DEV_STATUS);
        if (state_unit < 0)
                return state_unit ;
@@ -1386,10 +1375,9 @@ static int state_init(void)
 int dmasound_init(void)
 {
        int res ;
-#ifdef MODULE
+
        if (irq_installed)
                return -EBUSY;
-#endif
 
        /* Set up sound queue, /dev/audio and /dev/dsp. */
 
@@ -1408,9 +1396,7 @@ int dmasound_init(void)
                printk(KERN_ERR "DMA sound driver: Interrupt initialization failed\n");
                return -ENODEV;
        }
-#ifdef MODULE
        irq_installed = 1;
-#endif
 
        printk(KERN_INFO "%s DMA sound driver rev %03d installed\n",
                dmasound.mach.name, (DMASOUND_CORE_REVISION<<4) +
@@ -1424,8 +1410,6 @@ int dmasound_init(void)
        return 0;
 }
 
-#ifdef MODULE
-
 void dmasound_deinit(void)
 {
        if (irq_installed) {
@@ -1444,8 +1428,6 @@ void dmasound_deinit(void)
                unregister_sound_dsp(sq_unit);
 }
 
-#else /* !MODULE */
-
 static int dmasound_setup(char *str)
 {
        int ints[6], size;
@@ -1489,8 +1471,6 @@ static int dmasound_setup(char *str)
 
 __setup("dmasound=", dmasound_setup);
 
-#endif /* !MODULE */
-
     /*
      *  Conversion tables
      */
@@ -1577,9 +1557,7 @@ char dmasound_alaw2dma8[] = {
 
 EXPORT_SYMBOL(dmasound);
 EXPORT_SYMBOL(dmasound_init);
-#ifdef MODULE
 EXPORT_SYMBOL(dmasound_deinit);
-#endif
 EXPORT_SYMBOL(dmasound_write_sq);
 EXPORT_SYMBOL(dmasound_catchRadius);
 #ifdef HAS_8BIT_TABLES
index bba4dae8dcc70eb758bc239ec37381ffb31bba9f..50e30704bf6f9ef745c18dc990da6d6931573075 100644 (file)
@@ -844,8 +844,8 @@ snd_ad1889_create(struct snd_card *card, struct pci_dev *pci)
 }
 
 static int
-snd_ad1889_probe(struct pci_dev *pci,
-                const struct pci_device_id *pci_id)
+__snd_ad1889_probe(struct pci_dev *pci,
+                  const struct pci_device_id *pci_id)
 {
        int err;
        static int devno;
@@ -904,6 +904,12 @@ snd_ad1889_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_ad1889_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_ad1889_probe(pci, pci_id));
+}
+
 static const struct pci_device_id snd_ad1889_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_ANALOG_DEVICES, PCI_DEVICE_ID_AD1889JS) },
        { 0, },
index 92eb59db106de94805eb6b0cc5ba137611f3edd1..2378a39abaebec573a150184cf374d61636d7f7e 100644 (file)
@@ -2124,8 +2124,8 @@ static int snd_ali_create(struct snd_card *card,
        return 0;
 }
 
-static int snd_ali_probe(struct pci_dev *pci,
-                        const struct pci_device_id *pci_id)
+static int __snd_ali_probe(struct pci_dev *pci,
+                          const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct snd_ali *codec;
@@ -2170,6 +2170,12 @@ static int snd_ali_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_ali_probe(struct pci_dev *pci,
+                        const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_ali_probe(pci, pci_id));
+}
+
 static struct pci_driver ali5451_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_ali_ids,
index b86565dcdbe41fe3fb9a86ed00b080296ee5a8fd..c70aff0601205ede92ea2c97a2a999ed21ddbf42 100644 (file)
@@ -708,7 +708,7 @@ static int snd_als300_probe(struct pci_dev *pci,
 
        err = snd_als300_create(card, pci, chip_type);
        if (err < 0)
-               return err;
+               goto error;
 
        strcpy(card->driver, "ALS300");
        if (chip->chip_type == DEVICE_ALS300_PLUS)
@@ -723,11 +723,15 @@ static int snd_als300_probe(struct pci_dev *pci,
 
        err = snd_card_register(card);
        if (err < 0)
-               return err;
+               goto error;
 
        pci_set_drvdata(pci, card);
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 static struct pci_driver als300_driver = {
index 535eccd124bee3d1e4a5adb726a4fccad77cad4d..f33aeb692a112ae4e9a539b63792857c024e2f8c 100644 (file)
@@ -806,8 +806,8 @@ static void snd_card_als4000_free( struct snd_card *card )
        snd_als4000_free_gameport(acard);
 }
 
-static int snd_card_als4000_probe(struct pci_dev *pci,
-                                 const struct pci_device_id *pci_id)
+static int __snd_card_als4000_probe(struct pci_dev *pci,
+                                   const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -930,6 +930,12 @@ static int snd_card_als4000_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_card_als4000_probe(struct pci_dev *pci,
+                                 const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_card_als4000_probe(pci, pci_id));
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int snd_als4000_suspend(struct device *dev)
 {
index b8e035d5930d25055dcf0f234b57b7e23d683c61..43d01f1847ed7d3a8301104422bee1b3e2290ef0 100644 (file)
@@ -1572,8 +1572,8 @@ static int snd_atiixp_init(struct snd_card *card, struct pci_dev *pci)
 }
 
 
-static int snd_atiixp_probe(struct pci_dev *pci,
-                           const struct pci_device_id *pci_id)
+static int __snd_atiixp_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct atiixp *chip;
@@ -1623,6 +1623,12 @@ static int snd_atiixp_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_atiixp_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_atiixp_probe(pci, pci_id));
+}
+
 static struct pci_driver atiixp_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_atiixp_ids,
index 178dce8ef1e993c28bc0ca48642edaf7b1fbf865..8864c4c3c7e136b6fc7d6f61029ac481bff35743 100644 (file)
@@ -1201,8 +1201,8 @@ static int snd_atiixp_init(struct snd_card *card, struct pci_dev *pci)
 }
 
 
-static int snd_atiixp_probe(struct pci_dev *pci,
-                           const struct pci_device_id *pci_id)
+static int __snd_atiixp_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct atiixp_modem *chip;
@@ -1247,6 +1247,12 @@ static int snd_atiixp_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_atiixp_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_atiixp_probe(pci, pci_id));
+}
+
 static struct pci_driver atiixp_modem_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_atiixp_ids,
index 342ef2a6655e3e48ac0d213fffb92b97f0a5c287..eb234153691bc8c7a68a0015ebb87c4a7217dfd3 100644 (file)
@@ -193,7 +193,7 @@ snd_vortex_create(struct snd_card *card, struct pci_dev *pci)
 
 // constructor -- see "Constructor" sub-section
 static int
-snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+__snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -310,6 +310,12 @@ snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
        return 0;
 }
 
+static int
+snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_vortex_probe(pci, pci_id));
+}
+
 // pci_driver definition
 static struct pci_driver vortex_driver = {
        .name = KBUILD_MODNAME,
index d56f126d6fdd92a51f58eddd83114e8fea61e517..29a4bcdec237a9e9da64ab1e95159ae8a3f6a77b 100644 (file)
@@ -275,7 +275,7 @@ static int snd_aw2_probe(struct pci_dev *pci,
        /* (3) Create main component */
        err = snd_aw2_create(card, pci);
        if (err < 0)
-               return err;
+               goto error;
 
        /* initialize mutex */
        mutex_init(&chip->mtx);
@@ -294,13 +294,17 @@ static int snd_aw2_probe(struct pci_dev *pci,
        /* (6) Register card instance */
        err = snd_card_register(card);
        if (err < 0)
-               return err;
+               goto error;
 
        /* (7) Set PCI driver data */
        pci_set_drvdata(pci, card);
 
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 /* open callback */
index 089050470ff275dc129f0bfe6d28130f639ea6a0..7f329dfc5404a7089caad847e60cf398048a3097 100644 (file)
@@ -2427,7 +2427,7 @@ snd_azf3328_create(struct snd_card *card,
 }
 
 static int
-snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+__snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -2520,6 +2520,12 @@ snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
        return 0;
 }
 
+static int
+snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_azf3328_probe(pci, pci_id));
+}
+
 #ifdef CONFIG_PM_SLEEP
 static inline void
 snd_azf3328_suspend_regs(const struct snd_azf3328 *chip,
index d23f931638410cc8cf75dd070878745ab3eafe32..621985bfee5d7de0244550ea7c4585bba554b4d3 100644 (file)
@@ -805,8 +805,8 @@ static int snd_bt87x_detect_card(struct pci_dev *pci)
        return SND_BT87X_BOARD_UNKNOWN;
 }
 
-static int snd_bt87x_probe(struct pci_dev *pci,
-                          const struct pci_device_id *pci_id)
+static int __snd_bt87x_probe(struct pci_dev *pci,
+                            const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -889,6 +889,12 @@ static int snd_bt87x_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_bt87x_probe(struct pci_dev *pci,
+                          const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_bt87x_probe(pci, pci_id));
+}
+
 /* default entries for all Bt87x cards - it's not exported */
 /* driver_data is set to 0 to call detection */
 static const struct pci_device_id snd_bt87x_default_ids[] = {
index 8577f9fa5ea6e1a293aa547e6ec801d23d3f6113..cf1bac7a435f1a29f846c5f506cbd54a764b8335 100644 (file)
@@ -1725,8 +1725,8 @@ static int snd_ca0106_midi(struct snd_ca0106 *chip, unsigned int channel)
 }
 
 
-static int snd_ca0106_probe(struct pci_dev *pci,
-                                       const struct pci_device_id *pci_id)
+static int __snd_ca0106_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -1786,6 +1786,12 @@ static int snd_ca0106_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_ca0106_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_ca0106_probe(pci, pci_id));
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int snd_ca0106_suspend(struct device *dev)
 {
index dab801d9d3b481f2b86af757b3b45db9c3ede283..727db6d4339161912e446a7181c6c55313d414e2 100644 (file)
@@ -3247,15 +3247,19 @@ static int snd_cmipci_probe(struct pci_dev *pci,
 
        err = snd_cmipci_create(card, pci, dev);
        if (err < 0)
-               return err;
+               goto error;
 
        err = snd_card_register(card);
        if (err < 0)
-               return err;
+               goto error;
 
        pci_set_drvdata(pci, card);
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 #ifdef CONFIG_PM_SLEEP
index e7367402b84a34ff1d79cb41d5fecd78ceb10a14..0c9cadf7b3b802f99ad9d820f3e667c0952c41ab 100644 (file)
@@ -1827,8 +1827,8 @@ static void snd_cs4281_opl3_command(struct snd_opl3 *opl3, unsigned short cmd,
        spin_unlock_irqrestore(&opl3->reg_lock, flags);
 }
 
-static int snd_cs4281_probe(struct pci_dev *pci,
-                           const struct pci_device_id *pci_id)
+static int __snd_cs4281_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -1888,6 +1888,12 @@ static int snd_cs4281_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_cs4281_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_cs4281_probe(pci, pci_id));
+}
+
 /*
  * Power Management
  */
index 499fa0148f9a42508e484b7e65ba9592ef514e29..440b8f9b40c964b2c5c9cc771b3d492318a463cf 100644 (file)
@@ -281,8 +281,8 @@ static int snd_cs5535audio_create(struct snd_card *card,
        return 0;
 }
 
-static int snd_cs5535audio_probe(struct pci_dev *pci,
-                                const struct pci_device_id *pci_id)
+static int __snd_cs5535audio_probe(struct pci_dev *pci,
+                                  const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -331,6 +331,12 @@ static int snd_cs5535audio_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_cs5535audio_probe(struct pci_dev *pci,
+                                const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_cs5535audio_probe(pci, pci_id));
+}
+
 static struct pci_driver cs5535audio_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_cs5535audio_ids,
index 25b012ef5c3e680f720c7da345e292c31ea5c566..c70c3ac4e99a530d847f240c178d58dc4138a286 100644 (file)
@@ -1970,8 +1970,8 @@ static int snd_echo_create(struct snd_card *card,
 }
 
 /* constructor */
-static int snd_echo_probe(struct pci_dev *pci,
-                         const struct pci_device_id *pci_id)
+static int __snd_echo_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -2139,6 +2139,11 @@ static int snd_echo_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_echo_probe(struct pci_dev *pci,
+                         const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_echo_probe(pci, pci_id));
+}
 
 
 #if defined(CONFIG_PM_SLEEP)
index c49c44dc10820e67efb8628ed0f2727f17c1c3c8..89043392f3ec73796eaedd612891a8227192aa5c 100644 (file)
@@ -1491,8 +1491,8 @@ static int snd_emu10k1x_midi(struct emu10k1x *emu)
        return 0;
 }
 
-static int snd_emu10k1x_probe(struct pci_dev *pci,
-                             const struct pci_device_id *pci_id)
+static int __snd_emu10k1x_probe(struct pci_dev *pci,
+                               const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -1554,6 +1554,12 @@ static int snd_emu10k1x_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_emu10k1x_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_emu10k1x_probe(pci, pci_id));
+}
+
 // PCI IDs
 static const struct pci_device_id snd_emu10k1x_ids[] = {
        { PCI_VDEVICE(CREATIVE, 0x0006), 0 },   /* Dell OEM version (EMU10K1) */
index 2651f0c64c062ba72ad8f8a9f3b8ef4ba3875415..94efe347a97a986f3d5f175bcb3f320278e6f284 100644 (file)
@@ -2304,8 +2304,8 @@ static irqreturn_t snd_audiopci_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static int snd_audiopci_probe(struct pci_dev *pci,
-                             const struct pci_device_id *pci_id)
+static int __snd_audiopci_probe(struct pci_dev *pci,
+                               const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -2369,6 +2369,12 @@ static int snd_audiopci_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_audiopci_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_audiopci_probe(pci, pci_id));
+}
+
 static struct pci_driver ens137x_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_audiopci_ids,
index 00b976f42a3dbdb5857b41b1aa2337fd0a02ab1e..e34ec6f89e7e00449790c9d3b5a0e52f985215d3 100644 (file)
@@ -1716,8 +1716,8 @@ static int snd_es1938_mixer(struct es1938 *chip)
 }
        
 
-static int snd_es1938_probe(struct pci_dev *pci,
-                           const struct pci_device_id *pci_id)
+static int __snd_es1938_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -1796,6 +1796,12 @@ static int snd_es1938_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_es1938_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_es1938_probe(pci, pci_id));
+}
+
 static struct pci_driver es1938_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_es1938_ids,
index 6a8a02a9ecf41d494913450bacf47f6693ba1296..4a7e20bb11bcae4d7d1fdeccd62f42bea450474a 100644 (file)
@@ -2741,8 +2741,8 @@ static int snd_es1968_create(struct snd_card *card,
 
 /*
  */
-static int snd_es1968_probe(struct pci_dev *pci,
-                           const struct pci_device_id *pci_id)
+static int __snd_es1968_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -2848,6 +2848,12 @@ static int snd_es1968_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_es1968_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_es1968_probe(pci, pci_id));
+}
+
 static struct pci_driver es1968_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_es1968_ids,
index 9c22ff19e56d26aeef5001b46e2383e388c707a2..62b3cb126c6d01291ab3ea811855c9410747f0df 100644 (file)
@@ -1268,8 +1268,8 @@ static int snd_fm801_create(struct snd_card *card,
        return 0;
 }
 
-static int snd_card_fm801_probe(struct pci_dev *pci,
-                               const struct pci_device_id *pci_id)
+static int __snd_card_fm801_probe(struct pci_dev *pci,
+                                 const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -1333,6 +1333,12 @@ static int snd_card_fm801_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_card_fm801_probe(struct pci_dev *pci,
+                               const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_card_fm801_probe(pci, pci_id));
+}
+
 #ifdef CONFIG_PM_SLEEP
 static const unsigned char saved_regs[] = {
        FM801_PCM_VOL, FM801_I2S_VOL, FM801_FM_VOL, FM801_REC_SRC,
index 2d1fa706327b85291524fe8c13a7e1ed6bd26687..74c50ec040d9f2dfc3874bf918c8061cfef45e66 100644 (file)
@@ -478,28 +478,29 @@ const struct snd_pci_quirk cs8409_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0A29, "Bullseye", CS8409_BULLSEYE),
        SND_PCI_QUIRK(0x1028, 0x0A2A, "Bullseye", CS8409_BULLSEYE),
        SND_PCI_QUIRK(0x1028, 0x0A2B, "Bullseye", CS8409_BULLSEYE),
+       SND_PCI_QUIRK(0x1028, 0x0A77, "Cyborg", CS8409_CYBORG),
+       SND_PCI_QUIRK(0x1028, 0x0A78, "Cyborg", CS8409_CYBORG),
+       SND_PCI_QUIRK(0x1028, 0x0A79, "Cyborg", CS8409_CYBORG),
+       SND_PCI_QUIRK(0x1028, 0x0A7A, "Cyborg", CS8409_CYBORG),
+       SND_PCI_QUIRK(0x1028, 0x0A7D, "Cyborg", CS8409_CYBORG),
+       SND_PCI_QUIRK(0x1028, 0x0A7E, "Cyborg", CS8409_CYBORG),
+       SND_PCI_QUIRK(0x1028, 0x0A7F, "Cyborg", CS8409_CYBORG),
+       SND_PCI_QUIRK(0x1028, 0x0A80, "Cyborg", CS8409_CYBORG),
        SND_PCI_QUIRK(0x1028, 0x0AB0, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0AB2, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0AB1, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0AB3, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0AB4, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0AB5, "Warlock", CS8409_WARLOCK),
+       SND_PCI_QUIRK(0x1028, 0x0ACF, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0AD0, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0AD1, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0AD2, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0AD3, "Dolphin", CS8409_DOLPHIN),
        SND_PCI_QUIRK(0x1028, 0x0AD9, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0ADA, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0ADB, "Warlock", CS8409_WARLOCK),
        SND_PCI_QUIRK(0x1028, 0x0ADC, "Warlock", CS8409_WARLOCK),
-       SND_PCI_QUIRK(0x1028, 0x0AF4, "Warlock", CS8409_WARLOCK),
-       SND_PCI_QUIRK(0x1028, 0x0AF5, "Warlock", CS8409_WARLOCK),
-       SND_PCI_QUIRK(0x1028, 0x0BB5, "Warlock N3 15 TGL-U Nuvoton EC", CS8409_WARLOCK),
-       SND_PCI_QUIRK(0x1028, 0x0BB6, "Warlock V3 15 TGL-U Nuvoton EC", CS8409_WARLOCK),
-       SND_PCI_QUIRK(0x1028, 0x0A77, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0A78, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0A79, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0A7A, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0A7D, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0A7E, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0A7F, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0A80, "Cyborg", CS8409_CYBORG),
        SND_PCI_QUIRK(0x1028, 0x0ADF, "Cyborg", CS8409_CYBORG),
        SND_PCI_QUIRK(0x1028, 0x0AE0, "Cyborg", CS8409_CYBORG),
        SND_PCI_QUIRK(0x1028, 0x0AE1, "Cyborg", CS8409_CYBORG),
@@ -512,11 +513,30 @@ const struct snd_pci_quirk cs8409_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0AEE, "Cyborg", CS8409_CYBORG),
        SND_PCI_QUIRK(0x1028, 0x0AEF, "Cyborg", CS8409_CYBORG),
        SND_PCI_QUIRK(0x1028, 0x0AF0, "Cyborg", CS8409_CYBORG),
-       SND_PCI_QUIRK(0x1028, 0x0AD0, "Dolphin", CS8409_DOLPHIN),
-       SND_PCI_QUIRK(0x1028, 0x0AD1, "Dolphin", CS8409_DOLPHIN),
-       SND_PCI_QUIRK(0x1028, 0x0AD2, "Dolphin", CS8409_DOLPHIN),
-       SND_PCI_QUIRK(0x1028, 0x0AD3, "Dolphin", CS8409_DOLPHIN),
-       SND_PCI_QUIRK(0x1028, 0x0ACF, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0AF4, "Warlock", CS8409_WARLOCK),
+       SND_PCI_QUIRK(0x1028, 0x0AF5, "Warlock", CS8409_WARLOCK),
+       SND_PCI_QUIRK(0x1028, 0x0B92, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0B93, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0B94, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0B95, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0B96, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0B97, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0BB2, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0BB3, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0BB4, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0BB5, "Warlock N3 15 TGL-U Nuvoton EC", CS8409_WARLOCK),
+       SND_PCI_QUIRK(0x1028, 0x0BB6, "Warlock V3 15 TGL-U Nuvoton EC", CS8409_WARLOCK),
+       SND_PCI_QUIRK(0x1028, 0x0BB8, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0BB9, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0BBA, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0BBB, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0BBC, "Warlock MLK", CS8409_WARLOCK_MLK),
+       SND_PCI_QUIRK(0x1028, 0x0BBD, "Warlock MLK Dual Mic", CS8409_WARLOCK_MLK_DUAL_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0BD4, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0BD5, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0BD6, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0BD7, "Dolphin", CS8409_DOLPHIN),
+       SND_PCI_QUIRK(0x1028, 0x0BD8, "Dolphin", CS8409_DOLPHIN),
        {} /* terminator */
 };
 
@@ -524,6 +544,8 @@ const struct snd_pci_quirk cs8409_fixup_tbl[] = {
 const struct hda_model_fixup cs8409_models[] = {
        { .id = CS8409_BULLSEYE, .name = "bullseye" },
        { .id = CS8409_WARLOCK, .name = "warlock" },
+       { .id = CS8409_WARLOCK_MLK, .name = "warlock mlk" },
+       { .id = CS8409_WARLOCK_MLK_DUAL_MIC, .name = "warlock mlk dual mic" },
        { .id = CS8409_CYBORG, .name = "cyborg" },
        { .id = CS8409_DOLPHIN, .name = "dolphin" },
        {}
@@ -542,6 +564,18 @@ const struct hda_fixup cs8409_fixups[] = {
                .chained = true,
                .chain_id = CS8409_FIXUPS,
        },
+       [CS8409_WARLOCK_MLK] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = cs8409_cs42l42_pincfgs,
+               .chained = true,
+               .chain_id = CS8409_FIXUPS,
+       },
+       [CS8409_WARLOCK_MLK_DUAL_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = cs8409_cs42l42_pincfgs,
+               .chained = true,
+               .chain_id = CS8409_FIXUPS,
+       },
        [CS8409_CYBORG] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = cs8409_cs42l42_pincfgs,
index aff2b5abb81ea4f1e030f79157d38f0bcfbe0d94..343fabc4387da8630f87d2aa381c874220aacb9e 100644 (file)
@@ -733,6 +733,7 @@ static void cs42l42_resume(struct sub_codec *cs42l42)
                { 0x130A, 0x00 },
                { 0x130F, 0x00 },
        };
+       int fsv_old, fsv_new;
 
        /* Bring CS42L42 out of Reset */
        gpio_data = snd_hda_codec_read(codec, CS8409_PIN_AFG, 0, AC_VERB_GET_GPIO_DATA, 0);
@@ -749,8 +750,13 @@ static void cs42l42_resume(struct sub_codec *cs42l42)
        /* Clear interrupts, by reading interrupt status registers */
        cs8409_i2c_bulk_read(cs42l42, irq_regs, ARRAY_SIZE(irq_regs));
 
-       if (cs42l42->full_scale_vol)
-               cs8409_i2c_write(cs42l42, 0x2001, 0x01);
+       fsv_old = cs8409_i2c_read(cs42l42, 0x2001);
+       if (cs42l42->full_scale_vol == CS42L42_FULL_SCALE_VOL_0DB)
+               fsv_new = fsv_old & ~CS42L42_FULL_SCALE_VOL_MASK;
+       else
+               fsv_new = fsv_old & CS42L42_FULL_SCALE_VOL_MASK;
+       if (fsv_new != fsv_old)
+               cs8409_i2c_write(cs42l42, 0x2001, fsv_new);
 
        /* we have to explicitly allow unsol event handling even during the
         * resume phase so that the jack event is processed properly
@@ -906,9 +912,15 @@ static void cs8409_cs42l42_hw_init(struct hda_codec *codec)
                        cs8409_vendor_coef_set(codec, seq_bullseye->cir, seq_bullseye->coeff);
        }
 
-       /* DMIC1_MO=00b, DMIC1/2_SR=1 */
-       if (codec->fixup_id == CS8409_WARLOCK || codec->fixup_id == CS8409_CYBORG)
-               cs8409_vendor_coef_set(codec, 0x09, 0x0003);
+       switch (codec->fixup_id) {
+       case CS8409_CYBORG:
+       case CS8409_WARLOCK_MLK_DUAL_MIC:
+               /* DMIC1_MO=00b, DMIC1/2_SR=1 */
+               cs8409_vendor_coef_set(codec, CS8409_DMIC_CFG, 0x0003);
+               break;
+       default:
+               break;
+       }
 
        cs42l42_resume(cs42l42);
 
@@ -993,25 +1005,17 @@ void cs8409_cs42l42_fixups(struct hda_codec *codec, const struct hda_fixup *fix,
                cs8409_fix_caps(codec, CS8409_CS42L42_HP_PIN_NID);
                cs8409_fix_caps(codec, CS8409_CS42L42_AMIC_PIN_NID);
 
-               /* Set TIP_SENSE_EN for analog front-end of tip sense.
-                * Additionally set HSBIAS_SENSE_EN and Full Scale volume for some variants.
-                */
+               /* Set HSBIAS_SENSE_EN and Full Scale volume for some variants. */
                switch (codec->fixup_id) {
-               case CS8409_WARLOCK:
+               case CS8409_WARLOCK_MLK:
+               case CS8409_WARLOCK_MLK_DUAL_MIC:
                        spec->scodecs[CS8409_CODEC0]->hsbias_hiz = 0x0020;
-                       spec->scodecs[CS8409_CODEC0]->full_scale_vol = 1;
-                       break;
-               case CS8409_BULLSEYE:
-                       spec->scodecs[CS8409_CODEC0]->hsbias_hiz = 0x0020;
-                       spec->scodecs[CS8409_CODEC0]->full_scale_vol = 0;
-                       break;
-               case CS8409_CYBORG:
-                       spec->scodecs[CS8409_CODEC0]->hsbias_hiz = 0x00a0;
-                       spec->scodecs[CS8409_CODEC0]->full_scale_vol = 1;
+                       spec->scodecs[CS8409_CODEC0]->full_scale_vol = CS42L42_FULL_SCALE_VOL_0DB;
                        break;
                default:
-                       spec->scodecs[CS8409_CODEC0]->hsbias_hiz = 0x0003;
-                       spec->scodecs[CS8409_CODEC0]->full_scale_vol = 1;
+                       spec->scodecs[CS8409_CODEC0]->hsbias_hiz = 0x0020;
+                       spec->scodecs[CS8409_CODEC0]->full_scale_vol =
+                               CS42L42_FULL_SCALE_VOL_MINUS6DB;
                        break;
                }
 
@@ -1222,6 +1226,9 @@ void dolphin_fixups(struct hda_codec *codec, const struct hda_fixup *fix, int ac
                cs8409_fix_caps(codec, DOLPHIN_LO_PIN_NID);
                cs8409_fix_caps(codec, DOLPHIN_AMIC_PIN_NID);
 
+               spec->scodecs[CS8409_CODEC0]->full_scale_vol = CS42L42_FULL_SCALE_VOL_MINUS6DB;
+               spec->scodecs[CS8409_CODEC1]->full_scale_vol = CS42L42_FULL_SCALE_VOL_MINUS6DB;
+
                break;
        case HDA_FIXUP_ACT_PROBE:
                /* Fix Sample Rate to 48kHz */
index d0b725c7285b60a4c9de67a1f5c845b26580a54f..7df46bd8d2dae0e8579a5e288c222c074324e038 100644 (file)
@@ -235,6 +235,9 @@ enum cs8409_coefficient_index_registers {
 #define CS42L42_I2C_SLEEP_US                   (2000)
 #define CS42L42_PDN_TIMEOUT_US                 (250000)
 #define CS42L42_PDN_SLEEP_US                   (2000)
+#define CS42L42_FULL_SCALE_VOL_MASK            (2)
+#define CS42L42_FULL_SCALE_VOL_0DB             (1)
+#define CS42L42_FULL_SCALE_VOL_MINUS6DB                (0)
 
 /* Dell BULLSEYE / WARLOCK / CYBORG Specific Definitions */
 
@@ -264,6 +267,8 @@ enum cs8409_coefficient_index_registers {
 enum {
        CS8409_BULLSEYE,
        CS8409_WARLOCK,
+       CS8409_WARLOCK_MLK,
+       CS8409_WARLOCK_MLK_DUAL_MIC,
        CS8409_CYBORG,
        CS8409_FIXUPS,
        CS8409_DOLPHIN,
index c85ed7bc121e6ac8a81b65a216490a11a69a6b58..3e086eebf88d007b508e1aee2746ceebaf2bfd18 100644 (file)
@@ -1625,6 +1625,7 @@ static void hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
        struct hda_codec *codec = per_pin->codec;
        struct hdmi_spec *spec = codec->spec;
        struct hdmi_eld *eld = &spec->temp_eld;
+       struct device *dev = hda_codec_dev(codec);
        hda_nid_t pin_nid = per_pin->pin_nid;
        int dev_id = per_pin->dev_id;
        /*
@@ -1638,8 +1639,13 @@ static void hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
        int present;
        int ret;
 
+#ifdef CONFIG_PM
+       if (dev->power.runtime_status == RPM_SUSPENDING)
+               return;
+#endif
+
        ret = snd_hda_power_up_pm(codec);
-       if (ret < 0 && pm_runtime_suspended(hda_codec_dev(codec)))
+       if (ret < 0 && pm_runtime_suspended(dev))
                goto out;
 
        present = snd_hda_jack_pin_sense(codec, pin_nid, dev_id);
index c78f16944f431b253cd0a3db776b8ed4b90e36c9..62fbf3772b4123db475cea1a53797eb05bd87ff5 100644 (file)
@@ -2619,6 +2619,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x65f1, "Clevo PC50HS", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+       SND_PCI_QUIRK(0x1558, 0x65f5, "Clevo PD50PN[NRT]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
@@ -3617,8 +3618,8 @@ static void alc256_shutup(struct hda_codec *codec)
        /* If disable 3k pulldown control for alc257, the Mic detection will not work correctly
         * when booting with headset plugged. So skip setting it for the codec alc257
         */
-       if (spec->codec_variant != ALC269_TYPE_ALC257 &&
-           spec->codec_variant != ALC269_TYPE_ALC256)
+       if (codec->core.vendor_id != 0x10ec0236 &&
+           codec->core.vendor_id != 0x10ec0257)
                alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
 
        if (!spec->no_shutup_pins)
@@ -7006,6 +7007,7 @@ enum {
        ALC287_FIXUP_LEGION_16ACHG6,
        ALC287_FIXUP_CS35L41_I2C_2,
        ALC245_FIXUP_CS35L41_SPI_2,
+       ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED,
        ALC245_FIXUP_CS35L41_SPI_4,
        ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED,
        ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED,
@@ -8771,6 +8773,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cs35l41_fixup_spi_two,
        },
+       [ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = cs35l41_fixup_spi_two,
+               .chained = true,
+               .chain_id = ALC285_FIXUP_HP_GPIO_LED,
+       },
        [ALC245_FIXUP_CS35L41_SPI_4] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cs35l41_fixup_spi_four,
@@ -9026,7 +9034,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x89ac, "HP EliteBook 640 G9", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x89ae, "HP EliteBook 650 G9", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x89c3, "Zbook Studio G9", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
-       SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2),
+       SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
@@ -9257,6 +9265,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x505d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x505f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x5062, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x508b, "Thinkpad X12 Gen 1", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
        SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
@@ -11140,6 +11149,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE),
        SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS),
+       SND_PCI_QUIRK(0x17aa, 0x1057, "Lenovo P360", ALC897_FIXUP_HEADSET_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x32ca, "Lenovo ThinkCentre M80", ALC897_FIXUP_HEADSET_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),
index f6275868877a751b83d6622a024f70f4e3f12ef2..6fab2ad85bbec2bcfcd11e4b2b70dd5df08836fc 100644 (file)
@@ -2519,8 +2519,8 @@ static int snd_vt1724_create(struct snd_card *card,
  *
  */
 
-static int snd_vt1724_probe(struct pci_dev *pci,
-                           const struct pci_device_id *pci_id)
+static int __snd_vt1724_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -2662,6 +2662,12 @@ static int snd_vt1724_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_vt1724_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_vt1724_probe(pci, pci_id));
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int snd_vt1724_suspend(struct device *dev)
 {
index a51032b3ac4d85c7483fe514381c3155da461564..ae285c0a629c82a518af20812d681526b4e61a73 100644 (file)
@@ -3109,8 +3109,8 @@ static int check_default_spdif_aclink(struct pci_dev *pci)
        return 0;
 }
 
-static int snd_intel8x0_probe(struct pci_dev *pci,
-                             const struct pci_device_id *pci_id)
+static int __snd_intel8x0_probe(struct pci_dev *pci,
+                               const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct intel8x0 *chip;
@@ -3189,6 +3189,12 @@ static int snd_intel8x0_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_intel8x0_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_intel8x0_probe(pci, pci_id));
+}
+
 static struct pci_driver intel8x0_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_intel8x0_ids,
index 7de3cb2f17b525023f2ebd0d3c1456fe9bf949ee..2845cc006d0cfb8d13b6cf22044f5cd1a550b5dd 100644 (file)
@@ -1178,8 +1178,8 @@ static struct shortname_table {
        { 0 },
 };
 
-static int snd_intel8x0m_probe(struct pci_dev *pci,
-                              const struct pci_device_id *pci_id)
+static int __snd_intel8x0m_probe(struct pci_dev *pci,
+                                const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct intel8x0m *chip;
@@ -1225,6 +1225,12 @@ static int snd_intel8x0m_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_intel8x0m_probe(struct pci_dev *pci,
+                              const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_intel8x0m_probe(pci, pci_id));
+}
+
 static struct pci_driver intel8x0m_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_intel8x0m_ids,
index 5c9e240ff6a9c514658b4b905368e1857a7e980e..33b4f95d65b3fa91b9d12dec68a45ec87c0b52d5 100644 (file)
@@ -2355,7 +2355,7 @@ snd_korg1212_probe(struct pci_dev *pci,
 
        err = snd_korg1212_create(card, pci);
        if (err < 0)
-               return err;
+               goto error;
 
        strcpy(card->driver, "korg1212");
        strcpy(card->shortname, "korg1212");
@@ -2366,10 +2366,14 @@ snd_korg1212_probe(struct pci_dev *pci,
 
        err = snd_card_register(card);
        if (err < 0)
-               return err;
+               goto error;
        pci_set_drvdata(pci, card);
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 static struct pci_driver korg1212_driver = {
index 5269a1d396a5bfe48cec1f2d3be5a426c5c3ac03..1aa30e90b86a797f1a6f91c63bf1c9bb56bbcd4a 100644 (file)
@@ -637,8 +637,8 @@ static int lola_create(struct snd_card *card, struct pci_dev *pci, int dev)
        return 0;
 }
 
-static int lola_probe(struct pci_dev *pci,
-                     const struct pci_device_id *pci_id)
+static int __lola_probe(struct pci_dev *pci,
+                       const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -687,6 +687,12 @@ static int lola_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int lola_probe(struct pci_dev *pci,
+                     const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __lola_probe(pci, pci_id));
+}
+
 /* PCI IDs */
 static const struct pci_device_id lola_ids[] = {
        { PCI_VDEVICE(DIGIGRAM, 0x0001) },
index 168a1084f7303925980a01d5a14d16946a2ede3b..bd9b6148dd6fbe820e452e5369c632a3d83efda7 100644 (file)
@@ -1019,7 +1019,7 @@ static int snd_lx6464es_probe(struct pci_dev *pci,
        err = snd_lx6464es_create(card, pci);
        if (err < 0) {
                dev_err(card->dev, "error during snd_lx6464es_create\n");
-               return err;
+               goto error;
        }
 
        strcpy(card->driver, "LX6464ES");
@@ -1036,12 +1036,16 @@ static int snd_lx6464es_probe(struct pci_dev *pci,
 
        err = snd_card_register(card);
        if (err < 0)
-               return err;
+               goto error;
 
        dev_dbg(chip->card->dev, "initialization successful\n");
        pci_set_drvdata(pci, card);
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 static struct pci_driver lx6464es_driver = {
index 056838ead21d6f49ead0b71ac078efe4374b373f..261850775c8071f648f8cf8ed632d5734b858be9 100644 (file)
@@ -2637,7 +2637,7 @@ snd_m3_create(struct snd_card *card, struct pci_dev *pci,
 /*
  */
 static int
-snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+__snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -2702,6 +2702,12 @@ snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
        return 0;
 }
 
+static int
+snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_m3_probe(pci, pci_id));
+}
+
 static struct pci_driver m3_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_m3_ids,
index c9c178504959ea1d382137a991700b3ddc740f74..f99a1e96e9231e82a6a2cdcae7c9a7ed340e8d86 100644 (file)
@@ -1573,7 +1573,6 @@ snd_nm256_create(struct snd_card *card, struct pci_dev *pci)
        chip->coeffs_current = 0;
 
        snd_nm256_init_chip(chip);
-       card->private_free = snd_nm256_free;
 
        // pci_set_master(pci); /* needed? */
        return 0;
@@ -1680,6 +1679,7 @@ static int snd_nm256_probe(struct pci_dev *pci,
        err = snd_card_register(card);
        if (err < 0)
                return err;
+       card->private_free = snd_nm256_free;
 
        pci_set_drvdata(pci, card);
        return 0;
index 4fb3f2484fdba92787c5dcab2a7c8b5b382a4ad6..92ffe9dc20c55699a0c95cc9b4b1ac2ba8c3f98d 100644 (file)
@@ -576,7 +576,7 @@ static void oxygen_card_free(struct snd_card *card)
        mutex_destroy(&chip->mutex);
 }
 
-int oxygen_pci_probe(struct pci_dev *pci, int index, char *id,
+static int __oxygen_pci_probe(struct pci_dev *pci, int index, char *id,
                     struct module *owner,
                     const struct pci_device_id *ids,
                     int (*get_model)(struct oxygen *chip,
@@ -701,6 +701,16 @@ int oxygen_pci_probe(struct pci_dev *pci, int index, char *id,
        pci_set_drvdata(pci, card);
        return 0;
 }
+
+int oxygen_pci_probe(struct pci_dev *pci, int index, char *id,
+                    struct module *owner,
+                    const struct pci_device_id *ids,
+                    int (*get_model)(struct oxygen *chip,
+                                     const struct pci_device_id *id))
+{
+       return snd_card_free_on_error(&pci->dev,
+                                     __oxygen_pci_probe(pci, index, id, owner, ids, get_model));
+}
 EXPORT_SYMBOL(oxygen_pci_probe);
 
 #ifdef CONFIG_PM_SLEEP
index 5a987c683c41c14685674803ac0f3079d206d3b3..b37c877c2c16057505fae4b206d74f96657c66f3 100644 (file)
@@ -2023,7 +2023,7 @@ static void snd_riptide_joystick_remove(struct pci_dev *pci)
 #endif
 
 static int
-snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+__snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -2124,6 +2124,12 @@ snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
        return 0;
 }
 
+static int
+snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_card_riptide_probe(pci, pci_id));
+}
+
 static struct pci_driver driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_riptide_ids,
index 5b6bd9f0b2f77df56084e213f282f69020d351df..9c0ac025e14320387bd44e1c2fe7f4cbc102cf07 100644 (file)
@@ -1875,7 +1875,7 @@ static void snd_rme32_card_free(struct snd_card *card)
 }
 
 static int
-snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+__snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
 {
        static int dev;
        struct rme32 *rme32;
@@ -1927,6 +1927,12 @@ snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
        return 0;
 }
 
+static int
+snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_rme32_probe(pci, pci_id));
+}
+
 static struct pci_driver rme32_driver = {
        .name =         KBUILD_MODNAME,
        .id_table =     snd_rme32_ids,
index 8fc8115049203b5dbe65ed048d0466625d1593de..bccb7e0d3d116c9ec8e8cdef1d581a95573598a5 100644 (file)
@@ -2430,8 +2430,8 @@ static void snd_rme96_card_free(struct snd_card *card)
 }
 
 static int
-snd_rme96_probe(struct pci_dev *pci,
-               const struct pci_device_id *pci_id)
+__snd_rme96_probe(struct pci_dev *pci,
+                 const struct pci_device_id *pci_id)
 {
        static int dev;
        struct rme96 *rme96;
@@ -2498,6 +2498,12 @@ snd_rme96_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_rme96_probe(struct pci_dev *pci,
+                          const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_rme96_probe(pci, pci_id));
+}
+
 static struct pci_driver rme96_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_rme96_ids,
index 96c12dfb24cf9d00cbd98d498f95d4438096415b..3db641318d3ae4819cbf7cc10944b78cebf1bfd6 100644 (file)
@@ -5444,17 +5444,21 @@ static int snd_hdsp_probe(struct pci_dev *pci,
        hdsp->pci = pci;
        err = snd_hdsp_create(card, hdsp);
        if (err)
-               return err;
+               goto error;
 
        strcpy(card->shortname, "Hammerfall DSP");
        sprintf(card->longname, "%s at 0x%lx, irq %d", hdsp->card_name,
                hdsp->port, hdsp->irq);
        err = snd_card_register(card);
        if (err)
-               return err;
+               goto error;
        pci_set_drvdata(pci, card);
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 static struct pci_driver hdsp_driver = {
index ff06ee82607cf784e8ebc1d0ef24baada4d6aa60..fa1812e7a49dca872a3325510d7649d24fdf1e54 100644 (file)
@@ -6895,7 +6895,7 @@ static int snd_hdspm_probe(struct pci_dev *pci,
 
        err = snd_hdspm_create(card, hdspm);
        if (err < 0)
-               return err;
+               goto error;
 
        if (hdspm->io_type != MADIface) {
                snprintf(card->shortname, sizeof(card->shortname), "%s_%x",
@@ -6914,12 +6914,16 @@ static int snd_hdspm_probe(struct pci_dev *pci,
 
        err = snd_card_register(card);
        if (err < 0)
-               return err;
+               goto error;
 
        pci_set_drvdata(pci, card);
 
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 static struct pci_driver hdspm_driver = {
index 7755e19aa77617f140c259cc5b2ed942f39cfca0..1d614fe89a6ae12e17d02b5bbeb2e2351ffc6e27 100644 (file)
@@ -2572,7 +2572,7 @@ static int snd_rme9652_probe(struct pci_dev *pci,
        rme9652->pci = pci;
        err = snd_rme9652_create(card, rme9652, precise_ptr[dev]);
        if (err)
-               return err;
+               goto error;
 
        strcpy(card->shortname, rme9652->card_name);
 
@@ -2580,10 +2580,14 @@ static int snd_rme9652_probe(struct pci_dev *pci,
                card->shortname, rme9652->port, rme9652->irq);
        err = snd_card_register(card);
        if (err)
-               return err;
+               goto error;
        pci_set_drvdata(pci, card);
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 static struct pci_driver rme9652_driver = {
index 0b722b0e0604bfc546121ab6b970458299928130..fabe393607f8fa2862db6b73e5247d7bd1f63ccf 100644 (file)
@@ -1331,8 +1331,8 @@ static int sis_chip_create(struct snd_card *card,
        return 0;
 }
 
-static int snd_sis7019_probe(struct pci_dev *pci,
-                            const struct pci_device_id *pci_id)
+static int __snd_sis7019_probe(struct pci_dev *pci,
+                              const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct sis7019 *sis;
@@ -1352,8 +1352,8 @@ static int snd_sis7019_probe(struct pci_dev *pci,
        if (!codecs)
                codecs = SIS_PRIMARY_CODEC_PRESENT;
 
-       rc = snd_card_new(&pci->dev, index, id, THIS_MODULE,
-                         sizeof(*sis), &card);
+       rc = snd_devm_card_new(&pci->dev, index, id, THIS_MODULE,
+                              sizeof(*sis), &card);
        if (rc < 0)
                return rc;
 
@@ -1386,6 +1386,12 @@ static int snd_sis7019_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_sis7019_probe(struct pci_dev *pci,
+                            const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_sis7019_probe(pci, pci_id));
+}
+
 static struct pci_driver sis7019_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_sis7019_ids,
index c8c49881008fd78b461df2dbbb1a28c5c1598ccc..f91cbf6eeca0f6ef151760ccde8b360d6aed8041 100644 (file)
@@ -1387,8 +1387,8 @@ static int snd_sonicvibes_midi(struct sonicvibes *sonic,
        return 0;
 }
 
-static int snd_sonic_probe(struct pci_dev *pci,
-                          const struct pci_device_id *pci_id)
+static int __snd_sonic_probe(struct pci_dev *pci,
+                            const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -1459,6 +1459,12 @@ static int snd_sonic_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_sonic_probe(struct pci_dev *pci,
+                          const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_sonic_probe(pci, pci_id));
+}
+
 static struct pci_driver sonicvibes_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_sonic_ids,
index 65514f7e42d7d2f00f855bc981f6129e168d8873..361b83fd721e61650e0697164e7061fe8b88b7cf 100644 (file)
@@ -2458,8 +2458,8 @@ static int check_dxs_list(struct pci_dev *pci, int revision)
        return VIA_DXS_48K;
 };
 
-static int snd_via82xx_probe(struct pci_dev *pci,
-                            const struct pci_device_id *pci_id)
+static int __snd_via82xx_probe(struct pci_dev *pci,
+                              const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct via82xx *chip;
@@ -2569,6 +2569,12 @@ static int snd_via82xx_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_via82xx_probe(struct pci_dev *pci,
+                            const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_via82xx_probe(pci, pci_id));
+}
+
 static struct pci_driver via82xx_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_via82xx_ids,
index 234f7fbed2364318ef73f0d61cf1b5a52f879c90..ca7f024bf8ec6efe5387d205512120cdbc8872aa 100644 (file)
@@ -1103,8 +1103,8 @@ static int snd_via82xx_create(struct snd_card *card,
 }
 
 
-static int snd_via82xx_probe(struct pci_dev *pci,
-                            const struct pci_device_id *pci_id)
+static int __snd_via82xx_probe(struct pci_dev *pci,
+                              const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct via82xx_modem *chip;
@@ -1157,6 +1157,12 @@ static int snd_via82xx_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_via82xx_probe(struct pci_dev *pci,
+                            const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_via82xx_probe(pci, pci_id));
+}
+
 static struct pci_driver via82xx_modem_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_via82xx_modem_ids,
index 9b263a9a669dcb91c2e04414ca8bf03db835fff5..4c7b5d940799b09ab54d12a2bd6b20e59b4a6003 100644 (file)
@@ -107,6 +107,7 @@ int mt6358_set_mtkaif_protocol(struct snd_soc_component *cmpnt,
        priv->mtkaif_protocol = mtkaif_protocol;
        return 0;
 }
+EXPORT_SYMBOL_GPL(mt6358_set_mtkaif_protocol);
 
 static void playback_gpio_set(struct mt6358_priv *priv)
 {
@@ -273,6 +274,7 @@ int mt6358_mtkaif_calibration_enable(struct snd_soc_component *cmpnt)
                           1 << RG_AUD_PAD_TOP_DAT_MISO_LOOPBACK_SFT);
        return 0;
 }
+EXPORT_SYMBOL_GPL(mt6358_mtkaif_calibration_enable);
 
 int mt6358_mtkaif_calibration_disable(struct snd_soc_component *cmpnt)
 {
@@ -296,6 +298,7 @@ int mt6358_mtkaif_calibration_disable(struct snd_soc_component *cmpnt)
        capture_gpio_reset(priv);
        return 0;
 }
+EXPORT_SYMBOL_GPL(mt6358_mtkaif_calibration_disable);
 
 int mt6358_set_mtkaif_calibration_phase(struct snd_soc_component *cmpnt,
                                        int phase_1, int phase_2)
@@ -310,6 +313,7 @@ int mt6358_set_mtkaif_calibration_phase(struct snd_soc_component *cmpnt,
                           phase_2 << RG_AUD_PAD_TOP_PHASE_MODE2_SFT);
        return 0;
 }
+EXPORT_SYMBOL_GPL(mt6358_set_mtkaif_calibration_phase);
 
 /* dl pga gain */
 enum {
index 370bc790c6ba7f5b71ee13ba7262f29b7b6a734f..d9a0d4768c4d5627358a987544d405aae7940434 100644 (file)
@@ -462,11 +462,9 @@ static int hp_jack_event(struct notifier_block *nb, unsigned long event,
 
        if (event & SND_JACK_HEADPHONE)
                /* Disable speaker if headphone is plugged in */
-               snd_soc_dapm_disable_pin(dapm, "Ext Spk");
+               return snd_soc_dapm_disable_pin(dapm, "Ext Spk");
        else
-               snd_soc_dapm_enable_pin(dapm, "Ext Spk");
-
-       return 0;
+               return snd_soc_dapm_enable_pin(dapm, "Ext Spk");
 }
 
 static struct notifier_block hp_jack_nb = {
@@ -481,11 +479,9 @@ static int mic_jack_event(struct notifier_block *nb, unsigned long event,
 
        if (event & SND_JACK_MICROPHONE)
                /* Disable dmic if microphone is plugged in */
-               snd_soc_dapm_disable_pin(dapm, "DMIC");
+               return snd_soc_dapm_disable_pin(dapm, "DMIC");
        else
-               snd_soc_dapm_enable_pin(dapm, "DMIC");
-
-       return 0;
+               return snd_soc_dapm_enable_pin(dapm, "DMIC");
 }
 
 static struct notifier_block mic_jack_nb = {
index d3b7104069411c7994c37d7ce3061fd118f0295f..98700e75b82a1fbce1877d884672f2bec5eb1331 100644 (file)
@@ -469,14 +469,14 @@ static int rockchip_i2s_tdm_set_fmt(struct snd_soc_dai *cpu_dai,
                txcr_val = I2S_TXCR_IBM_NORMAL;
                rxcr_val = I2S_RXCR_IBM_NORMAL;
                break;
-       case SND_SOC_DAIFMT_DSP_A: /* PCM no delay mode */
-               txcr_val = I2S_TXCR_TFS_PCM;
-               rxcr_val = I2S_RXCR_TFS_PCM;
-               break;
-       case SND_SOC_DAIFMT_DSP_B: /* PCM delay 1 mode */
+       case SND_SOC_DAIFMT_DSP_A: /* PCM delay 1 mode */
                txcr_val = I2S_TXCR_TFS_PCM | I2S_TXCR_PBM_MODE(1);
                rxcr_val = I2S_RXCR_TFS_PCM | I2S_RXCR_PBM_MODE(1);
                break;
+       case SND_SOC_DAIFMT_DSP_B: /* PCM no delay mode */
+               txcr_val = I2S_TXCR_TFS_PCM;
+               rxcr_val = I2S_RXCR_TFS_PCM;
+               break;
        default:
                ret = -EINVAL;
                goto err_pm_put;
index b53f216d4ecc32291eb413602466a2f89d52eb3b..172419392b33820efb515b1c9c75205b1ad7e87b 100644 (file)
@@ -84,6 +84,7 @@ if SND_SOC_SOF_PCI
 config SND_SOC_SOF_MERRIFIELD
        tristate "SOF support for Tangier/Merrifield"
        default SND_SOC_SOF_PCI
+       select SND_SOC_SOF_PCI_DEV
        select SND_SOC_SOF_INTEL_ATOM_HIFI_EP
        help
          This adds support for Sound Open Firmware for Intel(R) platforms
index cec6e91afea2403cc4fd56580c07a01291be3b26..6d699065e81a21bae12505f929131eb74cce7961 100644 (file)
@@ -669,9 +669,9 @@ static const struct snd_pcm_hardware snd_usb_hardware =
                                SNDRV_PCM_INFO_PAUSE,
        .channels_min =         1,
        .channels_max =         256,
-       .buffer_bytes_max =     1024 * 1024,
+       .buffer_bytes_max =     INT_MAX, /* limited by BUFFER_TIME later */
        .period_bytes_min =     64,
-       .period_bytes_max =     512 * 1024,
+       .period_bytes_max =     INT_MAX, /* limited by PERIOD_TIME later */
        .periods_min =          2,
        .periods_max =          1024,
 };
@@ -1064,6 +1064,18 @@ static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substre
                        return err;
        }
 
+       /* set max period and buffer sizes for 1 and 2 seconds, respectively */
+       err = snd_pcm_hw_constraint_minmax(runtime,
+                                          SNDRV_PCM_HW_PARAM_PERIOD_TIME,
+                                          0, 1000000);
+       if (err < 0)
+               return err;
+       err = snd_pcm_hw_constraint_minmax(runtime,
+                                          SNDRV_PCM_HW_PARAM_BUFFER_TIME,
+                                          0, 2000000);
+       if (err < 0)
+               return err;
+
        /* additional hw constraints for implicit fb */
        err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
                                  hw_rule_format_implicit_fb, subs,
index 167834133b9bc8b81a8333f32938fceba6eb7810..b8359a0aa008a0a9f5dfcffb232cbe46ff2094d6 100644 (file)
@@ -8,7 +8,7 @@
  */
 
 /* handling of USB vendor/product ID pairs as 32-bit numbers */
-#define USB_ID(vendor, product) (((vendor) << 16) | (product))
+#define USB_ID(vendor, product) (((unsigned int)(vendor) << 16) | (product))
 #define USB_ID_VENDOR(id) ((id) >> 16)
 #define USB_ID_PRODUCT(id) ((u16)(id))
 
index b00634663346b99a242dc55b7353415816724da6..0d828e35b40191b55d91fdc49aa5e48353466d39 100644 (file)
@@ -1652,7 +1652,7 @@ static void hdmi_lpe_audio_free(struct snd_card *card)
  * This function is called when the i915 driver creates the
  * hdmi-lpe-audio platform device.
  */
-static int hdmi_lpe_audio_probe(struct platform_device *pdev)
+static int __hdmi_lpe_audio_probe(struct platform_device *pdev)
 {
        struct snd_card *card;
        struct snd_intelhad_card *card_ctx;
@@ -1815,6 +1815,11 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
        return 0;
 }
 
+static int hdmi_lpe_audio_probe(struct platform_device *pdev)
+{
+       return snd_card_free_on_error(&pdev->dev, __hdmi_lpe_audio_probe(pdev));
+}
+
 static const struct dev_pm_ops hdmi_lpe_audio_pm = {
        SET_SYSTEM_SLEEP_PM_OPS(hdmi_lpe_audio_suspend, hdmi_lpe_audio_resume)
 };
index 9afcc6467a095c350840eee1f4037f4cff2e0cda..e09d6908a21d36b1ee2feb65b477ff4859cce725 100644 (file)
@@ -75,6 +75,7 @@
 #define ARM_CPU_PART_CORTEX_A77                0xD0D
 #define ARM_CPU_PART_NEOVERSE_V1       0xD40
 #define ARM_CPU_PART_CORTEX_A78                0xD41
+#define ARM_CPU_PART_CORTEX_A78AE      0xD42
 #define ARM_CPU_PART_CORTEX_X1         0xD44
 #define ARM_CPU_PART_CORTEX_A510       0xD46
 #define ARM_CPU_PART_CORTEX_A710       0xD47
 #define MIDR_CORTEX_A77        MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
 #define MIDR_NEOVERSE_V1       MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
 #define MIDR_CORTEX_A78        MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
+#define MIDR_CORTEX_A78AE      MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
 #define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
 #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
 #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
index 490d489d0ee84518bce07daec02c24b140ecc47c..c1b6ddc02d2ff96eac2248ad47f1e9191d3e85be 100644 (file)
@@ -419,6 +419,16 @@ struct kvm_arm_copy_mte_tags {
 #define KVM_PSCI_RET_INVAL             PSCI_RET_INVALID_PARAMS
 #define KVM_PSCI_RET_DENIED            PSCI_RET_DENIED
 
+/* arm64-specific kvm_run::system_event flags */
+/*
+ * Reset caused by a PSCI v1.1 SYSTEM_RESET2 call.
+ * Valid only when the system event has a type of KVM_SYSTEM_EVENT_RESET.
+ */
+#define KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2        (1ULL << 0)
+
+/* run->fail_entry.hardware_entry_failure_reason codes. */
+#define KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED    (1ULL << 0)
+
 #endif
 
 #endif /* __ARM_KVM_H__ */
index 3edf05e98e58625c30419d5ffd31793d54bd7812..73e643ae94b6f2206d8dac0fdd10477e7c1eb8df 100644 (file)
 #define X86_FEATURE_TSXLDTRK           (18*32+16) /* TSX Suspend Load Address Tracking */
 #define X86_FEATURE_PCONFIG            (18*32+18) /* Intel PCONFIG */
 #define X86_FEATURE_ARCH_LBR           (18*32+19) /* Intel ARCH LBR */
+#define X86_FEATURE_IBT                        (18*32+20) /* Indirect Branch Tracking */
 #define X86_FEATURE_AMX_BF16           (18*32+22) /* AMX bf16 Support */
 #define X86_FEATURE_AVX512_FP16                (18*32+23) /* AVX512 FP16 */
 #define X86_FEATURE_AMX_TILE           (18*32+24) /* AMX tile Support */
index 0e7f303542bf35d686bf2fbcb4cfcebaa18f8d27..ee15311b6be1d99e2bea11bd4c03a8a36fd8c706 100644 (file)
 #define TSX_CTRL_RTM_DISABLE           BIT(0)  /* Disable RTM feature */
 #define TSX_CTRL_CPUID_CLEAR           BIT(1)  /* Disable TSX enumeration */
 
-/* SRBDS support */
 #define MSR_IA32_MCU_OPT_CTRL          0x00000123
-#define RNGDS_MITG_DIS                 BIT(0)
+#define RNGDS_MITG_DIS                 BIT(0)  /* SRBDS support */
+#define RTM_ALLOW                      BIT(1)  /* TSX development mode */
 
 #define MSR_IA32_SYSENTER_CS           0x00000174
 #define MSR_IA32_SYSENTER_ESP          0x00000175
 #define RTIT_CTL_DISRETC               BIT(11)
 #define RTIT_CTL_PTW_EN                        BIT(12)
 #define RTIT_CTL_BRANCH_EN             BIT(13)
+#define RTIT_CTL_EVENT_EN              BIT(31)
+#define RTIT_CTL_NOTNT                 BIT_ULL(55)
 #define RTIT_CTL_MTC_RANGE_OFFSET      14
 #define RTIT_CTL_MTC_RANGE             (0x0full << RTIT_CTL_MTC_RANGE_OFFSET)
 #define RTIT_CTL_CYC_THRESH_OFFSET     19
 #define MSR_ATOM_CORE_TURBO_RATIOS     0x0000066c
 #define MSR_ATOM_CORE_TURBO_VIDS       0x0000066d
 
-
 #define MSR_CORE_PERF_LIMIT_REASONS    0x00000690
 #define MSR_GFX_PERF_LIMIT_REASONS     0x000006B0
 #define MSR_RING_PERF_LIMIT_REASONS    0x000006B1
 
+/* Control-flow Enforcement Technology MSRs */
+#define MSR_IA32_U_CET                 0x000006a0 /* user mode cet */
+#define MSR_IA32_S_CET                 0x000006a2 /* kernel mode cet */
+#define CET_SHSTK_EN                   BIT_ULL(0)
+#define CET_WRSS_EN                    BIT_ULL(1)
+#define CET_ENDBR_EN                   BIT_ULL(2)
+#define CET_LEG_IW_EN                  BIT_ULL(3)
+#define CET_NO_TRACK_EN                        BIT_ULL(4)
+#define CET_SUPPRESS_DISABLE           BIT_ULL(5)
+#define CET_RESERVED                   (BIT_ULL(6) | BIT_ULL(7) | BIT_ULL(8) | BIT_ULL(9))
+#define CET_SUPPRESS                   BIT_ULL(10)
+#define CET_WAIT_ENDBR                 BIT_ULL(11)
+
+#define MSR_IA32_PL0_SSP               0x000006a4 /* ring-0 shadow stack pointer */
+#define MSR_IA32_PL1_SSP               0x000006a5 /* ring-1 shadow stack pointer */
+#define MSR_IA32_PL2_SSP               0x000006a6 /* ring-2 shadow stack pointer */
+#define MSR_IA32_PL3_SSP               0x000006a7 /* ring-3 shadow stack pointer */
+#define MSR_IA32_INT_SSP_TAB           0x000006a8 /* exception shadow stack table */
+
 /* Hardware P state interface */
 #define MSR_PPERF                      0x0000064e
 #define MSR_PERF_LIMIT_REASONS         0x0000064f
index 9800f966fd5148aadde09636bb7f1bd279e72721..c6d2c77d02524a37d3e685f83d17e6e6bd4801a0 100644 (file)
@@ -74,7 +74,7 @@ CFLAGS += -O2
 CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wno-missing-field-initializers
 CFLAGS += $(filter-out -Wswitch-enum -Wnested-externs,$(EXTRA_WARNINGS))
 CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ \
-       -I$(if $(OUTPUT),$(OUTPUT),.) \
+       -I$(or $(OUTPUT),.) \
        -I$(LIBBPF_INCLUDE) \
        -I$(srctree)/kernel/bpf/ \
        -I$(srctree)/tools/include \
@@ -180,7 +180,7 @@ endif
 
 $(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF_BOOTSTRAP)
        $(QUIET_CLANG)$(CLANG) \
-               -I$(if $(OUTPUT),$(OUTPUT),.) \
+               -I$(or $(OUTPUT),.) \
                -I$(srctree)/tools/include/uapi/ \
                -I$(LIBBPF_BOOTSTRAP_INCLUDE) \
                -g -O2 -Wall -target bpf -c $< -o $@
index 6f11e6fc9ffe33337564c4d0496a26ab6956640e..17cdf01e29a08e1363c2145d7a073969cc343400 100644 (file)
@@ -36,7 +36,7 @@ TMP_O := $(if $(OUTPUT),$(OUTPUT)feature/,./)
 
 clean:
        $(call QUIET_CLEAN, fixdep)
-       $(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
+       $(Q)find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
        $(Q)rm -f $(OUTPUT)fixdep
        $(call QUIET_CLEAN, feature-detect)
 ifneq ($(wildcard $(TMP_O)),)
index 1480910c792e2cb3fc6f63f858cf089e18222d70..de66e1cc073481c5f840ea2a6146ac0c4fe2f9cb 100644 (file)
@@ -217,9 +217,16 @@ strip-libs = $(filter-out -l%,$(1))
 PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null)
 PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS))
 PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS))
-PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
+PERL_EMBED_CCOPTS = $(shell perl -MExtUtils::Embed -e ccopts 2>/dev/null)
 FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
 
+ifeq ($(CC_NO_CLANG), 0)
+  PERL_EMBED_LDOPTS := $(filter-out -specs=%,$(PERL_EMBED_LDOPTS))
+  PERL_EMBED_CCOPTS := $(filter-out -flto=auto -ffat-lto-objects, $(PERL_EMBED_CCOPTS))
+  PERL_EMBED_CCOPTS := $(filter-out -specs=%,$(PERL_EMBED_CCOPTS))
+  FLAGS_PERL_EMBED += -Wno-compound-token-split-by-macro
+endif
+
 $(OUTPUT)test-libperl.bin:
        $(BUILD) $(FLAGS_PERL_EMBED)
 
index 5ebc195fd9c09cbbcc643cde1bb36328b62bf880..8843f0fa61199e94e65fa034627426db9d5ed139 100644 (file)
@@ -40,7 +40,7 @@ $(OUTPUT)counter_example: $(COUNTER_EXAMPLE)
 clean:
        rm -f $(ALL_PROGRAMS)
        rm -rf $(OUTPUT)include/linux/counter.h
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(bindir);          \
index 4404340275579133008000eb4b3ddd1b75a8376c..d29c9c49e2512a3342cb59071f29460532f87745 100644 (file)
@@ -78,7 +78,7 @@ $(OUTPUT)gpio-watch: $(GPIO_WATCH_IN)
 clean:
        rm -f $(ALL_PROGRAMS)
        rm -f $(OUTPUT)include/linux/gpio.h
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(bindir);          \
index b57143d9459c1176fd31389caedcb4907ff29087..fe770e679ae8fee34032099ad1cbaf8c1afbb130 100644 (file)
@@ -47,7 +47,7 @@ $(OUTPUT)hv_fcopy_daemon: $(HV_FCOPY_DAEMON_IN)
 
 clean:
        rm -f $(ALL_PROGRAMS)
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(sbindir); \
index 5d12ac4e7f8fe236e20e1e881572263f8792d9e2..fa720f062229532e6028ef851b40bdec1918523a 100644 (file)
@@ -58,7 +58,7 @@ $(OUTPUT)iio_generic_buffer: $(IIO_GENERIC_BUFFER_IN)
 clean:
        rm -f $(ALL_PROGRAMS)
        rm -rf $(OUTPUT)include/linux/iio
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(bindir);          \
index 1567a3294c3dbf3ee38295bc915e612538b2519d..6c1aa92a92e4411946335cbb9724b75d8efa987a 100644 (file)
@@ -75,6 +75,8 @@
 #define MADV_POPULATE_READ     22      /* populate (prefault) page tables readable */
 #define MADV_POPULATE_WRITE    23      /* populate (prefault) page tables writable */
 
+#define MADV_DONTNEED_LOCKED   24      /* like DONTNEED, but drop locked pages too */
+
 /* compatibility flags */
 #define MAP_FILE       0
 
index 914ebd9290e5192d8feec6f5ff772a0d501b627f..05c3642aaece44290e424c535bab6d7626a648df 100644 (file)
@@ -1118,10 +1118,16 @@ struct drm_i915_gem_exec_object2 {
        /**
         * When the EXEC_OBJECT_PINNED flag is specified this is populated by
         * the user with the GTT offset at which this object will be pinned.
+        *
         * When the I915_EXEC_NO_RELOC flag is specified this must contain the
         * presumed_offset of the object.
+        *
         * During execbuffer2 the kernel populates it with the value of the
         * current GTT offset of the object, for future presumed_offset writes.
+        *
+        * See struct drm_i915_gem_create_ext for the rules when dealing with
+        * alignment restrictions with I915_MEMORY_CLASS_DEVICE, on devices with
+        * minimum page sizes, like DG2.
         */
        __u64 offset;
 
@@ -3144,11 +3150,40 @@ struct drm_i915_gem_create_ext {
         *
         * The (page-aligned) allocated size for the object will be returned.
         *
-        * Note that for some devices we have might have further minimum
-        * page-size restrictions(larger than 4K), like for device local-memory.
-        * However in general the final size here should always reflect any
-        * rounding up, if for example using the I915_GEM_CREATE_EXT_MEMORY_REGIONS
-        * extension to place the object in device local-memory.
+        *
+        * DG2 64K min page size implications:
+        *
+        * On discrete platforms, starting from DG2, we have to contend with GTT
+        * page size restrictions when dealing with I915_MEMORY_CLASS_DEVICE
+        * objects.  Specifically the hardware only supports 64K or larger GTT
+        * page sizes for such memory. The kernel will already ensure that all
+        * I915_MEMORY_CLASS_DEVICE memory is allocated using 64K or larger page
+        * sizes underneath.
+        *
+        * Note that the returned size here will always reflect any required
+        * rounding up done by the kernel, i.e 4K will now become 64K on devices
+        * such as DG2.
+        *
+        * Special DG2 GTT address alignment requirement:
+        *
+        * The GTT alignment will also need to be at least 2M for such objects.
+        *
+        * Note that due to how the hardware implements 64K GTT page support, we
+        * have some further complications:
+        *
+        *   1) The entire PDE (which covers a 2MB virtual address range), must
+        *   contain only 64K PTEs, i.e mixing 4K and 64K PTEs in the same
+        *   PDE is forbidden by the hardware.
+        *
+        *   2) We still need to support 4K PTEs for I915_MEMORY_CLASS_SYSTEM
+        *   objects.
+        *
+        * To keep things simple for userland, we mandate that any GTT mappings
+        * must be aligned to and rounded up to 2MB. The kernel will internally
+        * pad them out to the next 2MB boundary. As this only wastes virtual
+        * address space and avoids userland having to copy any needlessly
+        * complicated PDE sharing scheme (coloring) and only affects DG2, this
+        * is deemed to be a good compromise.
         */
        __u64 size;
        /**
index bbc6b7c2dc1b3a54957ff53cedd4c23453dc64eb..91a6fe4e02c08c4b6ac6f1fb91f8f9e3fce85c0f 100644 (file)
@@ -562,9 +562,12 @@ struct kvm_s390_mem_op {
        __u32 op;               /* type of operation */
        __u64 buf;              /* buffer in userspace */
        union {
-               __u8 ar;        /* the access register number */
+               struct {
+                       __u8 ar;        /* the access register number */
+                       __u8 key;       /* access key, ignored if flag unset */
+               };
                __u32 sida_offset; /* offset into the sida */
-               __u8 reserved[32]; /* should be set to 0 */
+               __u8 reserved[32]; /* ignored */
        };
 };
 /* types for kvm_s390_mem_op->op */
@@ -572,9 +575,12 @@ struct kvm_s390_mem_op {
 #define KVM_S390_MEMOP_LOGICAL_WRITE   1
 #define KVM_S390_MEMOP_SIDA_READ       2
 #define KVM_S390_MEMOP_SIDA_WRITE      3
+#define KVM_S390_MEMOP_ABSOLUTE_READ   4
+#define KVM_S390_MEMOP_ABSOLUTE_WRITE  5
 /* flags for kvm_s390_mem_op->flags */
 #define KVM_S390_MEMOP_F_CHECK_ONLY            (1ULL << 0)
 #define KVM_S390_MEMOP_F_INJECT_EXCEPTION      (1ULL << 1)
+#define KVM_S390_MEMOP_F_SKEY_PROTECTION       (1ULL << 2)
 
 /* for KVM_INTERRUPT */
 struct kvm_interrupt {
@@ -1137,6 +1143,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_PPC_AIL_MODE_3 210
 #define KVM_CAP_S390_MEM_OP_EXTENSION 211
 #define KVM_CAP_PMU_CAPABILITY 212
+#define KVM_CAP_DISABLE_QUIRKS2 213
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index c998860d7bbc4351c37c702ea69ea88a814b19cf..5d99e7c242a25e11dc579e059564aec4989b3387 100644 (file)
 /* Get the valid iova range */
 #define VHOST_VDPA_GET_IOVA_RANGE      _IOR(VHOST_VIRTIO, 0x78, \
                                             struct vhost_vdpa_iova_range)
+
+/* Get the config size */
+#define VHOST_VDPA_GET_CONFIG_SIZE     _IOR(VHOST_VIRTIO, 0x79, __u32)
+
+/* Get the count of all virtqueues */
+#define VHOST_VDPA_GET_VQS_COUNT       _IOR(VHOST_VIRTIO, 0x80, __u32)
+
 #endif
index a13e9c7f1fc5007d130807d75aa0a0a0120ddb0f..e21e1b40b52575dee2c1222d165df590a406a281 100644 (file)
@@ -60,7 +60,7 @@ $(LIBFILE): $(API_IN)
 
 clean:
        $(call QUIET_CLEAN, libapi) $(RM) $(LIBFILE); \
-       find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
+       find $(or $(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
 
 FORCE:
 
index b8b37fe7600697149caa3aef95d85178294ca91b..064c89e315609f5f20b6ac144a6eb09c8dada5c5 100644 (file)
@@ -60,7 +60,7 @@ ifndef VERBOSE
   VERBOSE = 0
 endif
 
-INCLUDES = -I$(if $(OUTPUT),$(OUTPUT),.)                               \
+INCLUDES = -I$(or $(OUTPUT),.) \
           -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi
 
 export prefix libdir src obj
index 08fe6e3c4089fec34f9262ac1041add9be699262..21df023a2103ead8de7f245392c4188eae9a0c7f 100644 (file)
@@ -153,7 +153,7 @@ $(TESTS_STATIC): $(TESTS_IN) $(LIBPERF_A) $(LIBAPI)
        $(QUIET_LINK)$(CC) -o $@ $^
 
 $(TESTS_SHARED): $(TESTS_IN) $(LIBAPI)
-       $(QUIET_LINK)$(CC) -o $@ -L$(if $(OUTPUT),$(OUTPUT),.) $^ -lperf
+       $(QUIET_LINK)$(CC) -o $@ -L$(or $(OUTPUT),.) $^ -lperf
 
 make-tests: libs $(TESTS_SHARED) $(TESTS_STATIC)
 
index ee66760f1e63c70854a0a3a8033624a19b3d0825..384d5e076ee436ac2905c6572dbbdd0164bc2eb8 100644 (file)
@@ -319,6 +319,26 @@ struct perf_cpu perf_cpu_map__max(struct perf_cpu_map *map)
        return map->nr > 0 ? map->map[map->nr - 1] : result;
 }
 
+/** Is 'b' a subset of 'a'. */
+bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b)
+{
+       if (a == b || !b)
+               return true;
+       if (!a || b->nr > a->nr)
+               return false;
+
+       for (int i = 0, j = 0; i < a->nr; i++) {
+               if (a->map[i].cpu > b->map[j].cpu)
+                       return false;
+               if (a->map[i].cpu == b->map[j].cpu) {
+                       j++;
+                       if (j == b->nr)
+                               return true;
+               }
+       }
+       return false;
+}
+
 /*
  * Merge two cpumaps
  *
@@ -335,17 +355,12 @@ struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
        int i, j, k;
        struct perf_cpu_map *merged;
 
-       if (!orig && !other)
-               return NULL;
-       if (!orig) {
-               perf_cpu_map__get(other);
-               return other;
-       }
-       if (!other)
-               return orig;
-       if (orig->nr == other->nr &&
-           !memcmp(orig->map, other->map, orig->nr * sizeof(struct perf_cpu)))
+       if (perf_cpu_map__is_subset(orig, other))
                return orig;
+       if (perf_cpu_map__is_subset(other, orig)) {
+               perf_cpu_map__put(orig);
+               return perf_cpu_map__get(other);
+       }
 
        tmp_len = orig->nr + other->nr;
        tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
index 9a770bfdc804293f02257d8c8a532939b8a4874a..a09315538a303b788dd3d7107717d6aa3ffc2237 100644 (file)
@@ -41,10 +41,10 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
         */
        if (!evsel->own_cpus || evlist->has_user_cpus) {
                perf_cpu_map__put(evsel->cpus);
-               evsel->cpus = perf_cpu_map__get(evlist->cpus);
-       } else if (!evsel->system_wide && perf_cpu_map__empty(evlist->cpus)) {
+               evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
+       } else if (!evsel->system_wide && perf_cpu_map__empty(evlist->user_requested_cpus)) {
                perf_cpu_map__put(evsel->cpus);
-               evsel->cpus = perf_cpu_map__get(evlist->cpus);
+               evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
        } else if (evsel->cpus != evsel->own_cpus) {
                perf_cpu_map__put(evsel->cpus);
                evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
@@ -123,10 +123,10 @@ static void perf_evlist__purge(struct perf_evlist *evlist)
 
 void perf_evlist__exit(struct perf_evlist *evlist)
 {
-       perf_cpu_map__put(evlist->cpus);
+       perf_cpu_map__put(evlist->user_requested_cpus);
        perf_cpu_map__put(evlist->all_cpus);
        perf_thread_map__put(evlist->threads);
-       evlist->cpus = NULL;
+       evlist->user_requested_cpus = NULL;
        evlist->all_cpus = NULL;
        evlist->threads = NULL;
        fdarray__exit(&evlist->pollfd);
@@ -155,9 +155,9 @@ void perf_evlist__set_maps(struct perf_evlist *evlist,
         * original reference count of 1.  If that is not the case it is up to
         * the caller to increase the reference count.
         */
-       if (cpus != evlist->cpus) {
-               perf_cpu_map__put(evlist->cpus);
-               evlist->cpus = perf_cpu_map__get(cpus);
+       if (cpus != evlist->user_requested_cpus) {
+               perf_cpu_map__put(evlist->user_requested_cpus);
+               evlist->user_requested_cpus = perf_cpu_map__get(cpus);
        }
 
        if (threads != evlist->threads) {
@@ -294,7 +294,7 @@ add:
 
 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
 {
-       int nr_cpus = perf_cpu_map__nr(evlist->cpus);
+       int nr_cpus = perf_cpu_map__nr(evlist->user_requested_cpus);
        int nr_threads = perf_thread_map__nr(evlist->threads);
        int nfds = 0;
        struct perf_evsel *evsel;
@@ -426,7 +426,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
               int idx, struct perf_mmap_param *mp, int cpu_idx,
               int thread, int *_output, int *_output_overwrite)
 {
-       struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx);
+       struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->user_requested_cpus, cpu_idx);
        struct perf_evsel *evsel;
        int revent;
 
@@ -536,7 +536,7 @@ mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
             struct perf_mmap_param *mp)
 {
        int nr_threads = perf_thread_map__nr(evlist->threads);
-       int nr_cpus    = perf_cpu_map__nr(evlist->cpus);
+       int nr_cpus    = perf_cpu_map__nr(evlist->user_requested_cpus);
        int cpu, thread;
 
        for (cpu = 0; cpu < nr_cpus; cpu++) {
@@ -564,8 +564,8 @@ static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
 {
        int nr_mmaps;
 
-       nr_mmaps = perf_cpu_map__nr(evlist->cpus);
-       if (perf_cpu_map__empty(evlist->cpus))
+       nr_mmaps = perf_cpu_map__nr(evlist->user_requested_cpus);
+       if (perf_cpu_map__empty(evlist->user_requested_cpus))
                nr_mmaps = perf_thread_map__nr(evlist->threads);
 
        return nr_mmaps;
@@ -576,8 +576,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
                          struct perf_mmap_param *mp)
 {
        struct perf_evsel *evsel;
-       const struct perf_cpu_map *cpus = evlist->cpus;
-       const struct perf_thread_map *threads = evlist->threads;
+       const struct perf_cpu_map *cpus = evlist->user_requested_cpus;
 
        if (!ops || !ops->get || !ops->mmap)
                return -EINVAL;
@@ -589,7 +588,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
        perf_evlist__for_each_entry(evlist, evsel) {
                if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
                    evsel->sample_id == NULL &&
-                   perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
+                   perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
                        return -ENOMEM;
        }
 
index 1973a18c096b079e8685288344b54f99a554000a..35dd29642296e6602a9702257cf05c55d4093c82 100644 (file)
@@ -25,5 +25,6 @@ struct perf_cpu_map {
 #endif
 
 int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu);
+bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b);
 
 #endif /* __LIBPERF_INTERNAL_CPUMAP_H */
index 4cefade540bdf64aeee4143bb093a67dbf49dc52..e3e64f37db7ba4319c7a8f8c6337b449dc0168fa 100644 (file)
@@ -19,7 +19,12 @@ struct perf_evlist {
        int                      nr_entries;
        int                      nr_groups;
        bool                     has_user_cpus;
-       struct perf_cpu_map     *cpus;
+       /**
+        * The cpus passed from the command line or all online CPUs by
+        * default.
+        */
+       struct perf_cpu_map     *user_requested_cpus;
+       /** The union of all evsel cpu maps. */
        struct perf_cpu_map     *all_cpus;
        struct perf_thread_map  *threads;
        int                      nr_mmaps;
index 1c777a72bb398346a24afa192a3d5d30f0d40fa7..8f1a09cdfd17e5a5b97e0ac84c1bfc737ac672fb 100644 (file)
@@ -63,7 +63,7 @@ $(LIBFILE): $(SUBCMD_IN)
 
 clean:
        $(call QUIET_CLEAN, libsubcmd) $(RM) $(LIBFILE); \
-       find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
+       find $(or $(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
 
 FORCE:
 
index 92ce4fce7bc7301ca603e088478c9b6d65c1e42f..0dbd397f319d1a0793f08dfa3bd6e29b44fdd682 100644 (file)
@@ -13,7 +13,7 @@ srctree := $(patsubst %/,%,$(dir $(srctree)))
 endif
 
 SUBCMD_SRCDIR          = $(srctree)/tools/lib/subcmd/
-LIBSUBCMD_OUTPUT       = $(if $(OUTPUT),$(OUTPUT),$(CURDIR)/)
+LIBSUBCMD_OUTPUT       = $(or $(OUTPUT),$(CURDIR)/)
 LIBSUBCMD              = $(LIBSUBCMD_OUTPUT)libsubcmd.a
 
 OBJTOOL    := $(OUTPUT)objtool
index 6de5085e3e5a948454d1b962c03137450dce5a6f..bd0c2c828940a083e84ee93ae2c214b790c39fac 100644 (file)
@@ -1155,6 +1155,17 @@ static void annotate_call_site(struct objtool_file *file,
                                       : arch_nop_insn(insn->len));
 
                insn->type = sibling ? INSN_RETURN : INSN_NOP;
+
+               if (sibling) {
+                       /*
+                        * We've replaced the tail-call JMP insn by two new
+                        * insn: RET; INT3, except we only have a single struct
+                        * insn here. Mark it retpoline_safe to avoid the SLS
+                        * warning, instead of adding another insn.
+                        */
+                       insn->retpoline_safe = true;
+               }
+
                return;
        }
 
@@ -1239,11 +1250,20 @@ static bool same_function(struct instruction *insn1, struct instruction *insn2)
        return insn1->func->pfunc == insn2->func->pfunc;
 }
 
-static bool is_first_func_insn(struct instruction *insn)
+static bool is_first_func_insn(struct objtool_file *file, struct instruction *insn)
 {
-       return insn->offset == insn->func->offset ||
-              (insn->type == INSN_ENDBR &&
-               insn->offset == insn->func->offset + insn->len);
+       if (insn->offset == insn->func->offset)
+               return true;
+
+       if (ibt) {
+               struct instruction *prev = prev_insn_same_sym(file, insn);
+
+               if (prev && prev->type == INSN_ENDBR &&
+                   insn->offset == insn->func->offset + prev->len)
+                       return true;
+       }
+
+       return false;
 }
 
 /*
@@ -1327,7 +1347,7 @@ static int add_jump_destinations(struct objtool_file *file)
                                insn->jump_dest->func->pfunc = insn->func;
 
                        } else if (!same_function(insn, insn->jump_dest) &&
-                                  is_first_func_insn(insn->jump_dest)) {
+                                  is_first_func_insn(file, insn->jump_dest)) {
                                /* internal sibling call (without reloc) */
                                add_call_dest(file, insn, insn->jump_dest->func, true);
                        }
index 4b95a517635519bf57a127e6dca2b6496bedc716..57744778b518aa3fbcd2ff7e2e87a2439a14723e 100644 (file)
@@ -42,7 +42,7 @@ $(OUTPUT)pcitest: $(PCITEST_IN)
 clean:
        rm -f $(ALL_PROGRAMS)
        rm -rf $(OUTPUT)include/
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(bindir);          \
index 9c330cdfa973abac7fb25f23f48cb1eeffee70b2..71ebdf8125de31b7c0c0be39c4d3762fa6722ff0 100644 (file)
@@ -83,7 +83,7 @@ linkperf:perf-buildid-list[1], linkperf:perf-c2c[1],
 linkperf:perf-config[1], linkperf:perf-data[1], linkperf:perf-diff[1],
 linkperf:perf-evlist[1], linkperf:perf-ftrace[1],
 linkperf:perf-help[1], linkperf:perf-inject[1],
-linkperf:perf-intel-pt[1], linkperf:perf-kallsyms[1],
+linkperf:perf-intel-pt[1], linkperf:perf-iostat[1], linkperf:perf-kallsyms[1],
 linkperf:perf-kmem[1], linkperf:perf-kvm[1], linkperf:perf-lock[1],
 linkperf:perf-mem[1], linkperf:perf-probe[1], linkperf:perf-sched[1],
 linkperf:perf-script[1], linkperf:perf-test[1],
index 96ad944ca6a885cdb1a6c2dd1cd63d33026c2c8b..f3bf9297bcc03c5e5075a0233d8ca49285101c82 100644 (file)
@@ -272,6 +272,9 @@ ifdef PYTHON_CONFIG
   PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
   PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null)
   FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
+  ifeq ($(CC_NO_CLANG), 0)
+    PYTHON_EMBED_CCOPTS := $(filter-out -ffat-lto-objects, $(PYTHON_EMBED_CCOPTS))
+  endif
 endif
 
 FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS)
@@ -790,6 +793,9 @@ else
     LDFLAGS += $(PERL_EMBED_LDFLAGS)
     EXTLIBS += $(PERL_EMBED_LIBADD)
     CFLAGS += -DHAVE_LIBPERL_SUPPORT
+    ifeq ($(CC_NO_CLANG), 0)
+      CFLAGS += -Wno-compound-token-split-by-macro
+    endif
     $(call detected,CONFIG_LIBPERL)
   endif
 endif
index 9c935f86d172e34b9f0e82ef0f1665b78c7c0d35..69473a836bae8b0b52a036dbfcf664895e6cb142 100644 (file)
@@ -691,9 +691,8 @@ $(OUTPUT)common-cmds.h: $(wildcard Documentation/perf-*.txt)
 $(SCRIPTS) : % : %.sh
        $(QUIET_GEN)$(INSTALL) '$@.sh' '$(OUTPUT)$@'
 
-$(OUTPUT)PERF-VERSION-FILE: ../../.git/HEAD ../../.git/ORIG_HEAD
+$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
        $(Q)$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
-       $(Q)touch $(OUTPUT)PERF-VERSION-FILE
 
 # These can record PERF_VERSION
 perf.spec $(SCRIPTS) \
@@ -724,7 +723,7 @@ endif
 # get relative building directory (to $(OUTPUT))
 # and '.' if it's $(OUTPUT) itself
 __build-dir = $(subst $(OUTPUT),,$(dir $@))
-build-dir   = $(if $(__build-dir),$(__build-dir),.)
+build-dir   = $(or $(__build-dir),.)
 
 prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioctl_array) \
        $(fadvise_advice_array) \
@@ -1090,7 +1089,7 @@ bpf-skel-clean:
 
 clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBPERF)-clean fixdep-clean python-clean bpf-skel-clean
        $(call QUIET_CLEAN, core-objs)  $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(OUTPUT)perf-iostat $(LANG_BINDINGS)
-       $(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
+       $(Q)find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
        $(Q)$(RM) $(OUTPUT).config-detected
        $(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32 $(OUTPUT)pmu-events/jevents $(OUTPUT)$(LIBJVMTI).so
        $(call QUIET_CLEAN, core-gen)   $(RM)  *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)FEATURE-DUMP $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex* \
@@ -1139,21 +1138,12 @@ else
        @echo "FEATURE-DUMP file available in $(OUTPUT)FEATURE-DUMP"
 endif
 
-#
-# Trick: if ../../.git does not exist - we are building out of tree for example,
-# then force version regeneration:
-#
-ifeq ($(wildcard ../../.git/HEAD),)
-    GIT-HEAD-PHONY = ../../.git/HEAD ../../.git/ORIG_HEAD
-else
-    GIT-HEAD-PHONY =
-endif
 
 FORCE:
 
 .PHONY: all install clean config-clean strip install-gtk
 .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
-.PHONY: $(GIT-HEAD-PHONY) TAGS tags cscope FORCE prepare
+.PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope FORCE prepare
 .PHONY: libtraceevent_plugins archheaders
 
 endif # force_fixdep
index cbc55524595955af8ab72e10164c50aad568683c..11c71aa219f7ccb118760e03156bec0a22ab16d2 100644 (file)
@@ -199,7 +199,7 @@ static int cs_etm_set_option(struct auxtrace_record *itr,
                             struct evsel *evsel, u32 option)
 {
        int i, err = -EINVAL;
-       struct perf_cpu_map *event_cpus = evsel->evlist->core.cpus;
+       struct perf_cpu_map *event_cpus = evsel->evlist->core.user_requested_cpus;
        struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
 
        /* Set option of each CPU we have */
@@ -299,7 +299,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
                                container_of(itr, struct cs_etm_recording, itr);
        struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
        struct evsel *evsel, *cs_etm_evsel = NULL;
-       struct perf_cpu_map *cpus = evlist->core.cpus;
+       struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
        bool privileged = perf_event_paranoid_check(-1);
        int err = 0;
 
@@ -522,7 +522,7 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
 {
        int i;
        int etmv3 = 0, etmv4 = 0, ete = 0;
-       struct perf_cpu_map *event_cpus = evlist->core.cpus;
+       struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus;
        struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
 
        /* cpu map is not empty, we have specific CPUs to work with */
@@ -713,7 +713,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
        u32 offset;
        u64 nr_cpu, type;
        struct perf_cpu_map *cpu_map;
-       struct perf_cpu_map *event_cpus = session->evlist->core.cpus;
+       struct perf_cpu_map *event_cpus = session->evlist->core.user_requested_cpus;
        struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
        struct cs_etm_recording *ptr =
                        container_of(itr, struct cs_etm_recording, itr);
index 5860bbaea95a95109e9a14b13f5322a4482b0d82..af4d63af8072affd7b828ccf6f649c8063bf5172 100644 (file)
@@ -144,7 +144,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
                        container_of(itr, struct arm_spe_recording, itr);
        struct perf_pmu *arm_spe_pmu = sper->arm_spe_pmu;
        struct evsel *evsel, *arm_spe_evsel = NULL;
-       struct perf_cpu_map *cpus = evlist->core.cpus;
+       struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
        bool privileged = perf_event_paranoid_check(-1);
        struct evsel *tracking_evsel;
        int err;
@@ -239,6 +239,12 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
                arm_spe_set_timestamp(itr, arm_spe_evsel);
        }
 
+       /*
+        * Set this only so that perf report knows that SPE generates memory info. It has no effect
+        * on the opening of the event or the SPE data produced.
+        */
+       evsel__set_sample_bit(arm_spe_evsel, DATA_SRC);
+
        /* Add dummy event to keep tracking */
        err = parse_events(evlist, "dummy:u", NULL);
        if (err)
index 4a76d49d25d606055648e9ed70a59d5239e70f1c..d68a0f48e41eaa6109e376853b1648473c91b96d 100644 (file)
@@ -110,7 +110,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
                        container_of(itr, struct intel_bts_recording, itr);
        struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu;
        struct evsel *evsel, *intel_bts_evsel = NULL;
-       const struct perf_cpu_map *cpus = evlist->core.cpus;
+       const struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
        bool privileged = perf_event_paranoid_check(-1);
 
        if (opts->auxtrace_sample_mode) {
index 8c31578d6f4abd042f4107168fa3b959e0e5b12f..38ec2666ec12d2c5c20375bcc1e4a953ae61667f 100644 (file)
@@ -382,7 +382,7 @@ static int intel_pt_info_fill(struct auxtrace_record *itr,
                        ui__warning("Intel Processor Trace: TSC not available\n");
        }
 
-       per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.cpus);
+       per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.user_requested_cpus);
 
        auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
        auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
@@ -632,7 +632,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
        struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
        bool have_timing_info, need_immediate = false;
        struct evsel *evsel, *intel_pt_evsel = NULL;
-       const struct perf_cpu_map *cpus = evlist->core.cpus;
+       const struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
        bool privileged = perf_event_paranoid_check(-1);
        u64 tsc_bit;
        int err;
index 134612bde0cb3c0c9f706e58dd24b5ce64ab6b74..4256dc5d6236d4aeef59b02cc2cbcfa3dec4c26b 100644 (file)
@@ -222,13 +222,20 @@ static void init_fdmaps(struct worker *w, int pct)
 static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
 {
        pthread_attr_t thread_attr, *attrp = NULL;
-       cpu_set_t cpuset;
+       cpu_set_t *cpuset;
        unsigned int i, j;
        int ret = 0;
+       int nrcpus;
+       size_t size;
 
        if (!noaffinity)
                pthread_attr_init(&thread_attr);
 
+       nrcpus = perf_cpu_map__nr(cpu);
+       cpuset = CPU_ALLOC(nrcpus);
+       BUG_ON(!cpuset);
+       size = CPU_ALLOC_SIZE(nrcpus);
+
        for (i = 0; i < nthreads; i++) {
                struct worker *w = &worker[i];
 
@@ -252,22 +259,28 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
                        init_fdmaps(w, 50);
 
                if (!noaffinity) {
-                       CPU_ZERO(&cpuset);
-                       CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
+                       CPU_ZERO_S(size, cpuset);
+                       CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu,
+                                       size, cpuset);
 
-                       ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset);
-                       if (ret)
+                       ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset);
+                       if (ret) {
+                               CPU_FREE(cpuset);
                                err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+                       }
 
                        attrp = &thread_attr;
                }
 
                ret = pthread_create(&w->thread, attrp, workerfn,
                                     (void *)(struct worker *) w);
-               if (ret)
+               if (ret) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_create");
+               }
        }
 
+       CPU_FREE(cpuset);
        if (!noaffinity)
                pthread_attr_destroy(&thread_attr);
 
index 37de970c97437ccb983c2154a250d3891c867546..2728b0140853fd6a3e89b90317e0944b0140df40 100644 (file)
@@ -291,9 +291,11 @@ static void print_summary(void)
 static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
 {
        pthread_attr_t thread_attr, *attrp = NULL;
-       cpu_set_t cpuset;
+       cpu_set_t *cpuset;
        unsigned int i, j;
        int ret = 0, events = EPOLLIN;
+       int nrcpus;
+       size_t size;
 
        if (oneshot)
                events |= EPOLLONESHOT;
@@ -306,6 +308,11 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
        if (!noaffinity)
                pthread_attr_init(&thread_attr);
 
+       nrcpus = perf_cpu_map__nr(cpu);
+       cpuset = CPU_ALLOC(nrcpus);
+       BUG_ON(!cpuset);
+       size = CPU_ALLOC_SIZE(nrcpus);
+
        for (i = 0; i < nthreads; i++) {
                struct worker *w = &worker[i];
 
@@ -341,22 +348,28 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
                }
 
                if (!noaffinity) {
-                       CPU_ZERO(&cpuset);
-                       CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
+                       CPU_ZERO_S(size, cpuset);
+                       CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu,
+                                       size, cpuset);
 
-                       ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset);
-                       if (ret)
+                       ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset);
+                       if (ret) {
+                               CPU_FREE(cpuset);
                                err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+                       }
 
                        attrp = &thread_attr;
                }
 
                ret = pthread_create(&w->thread, attrp, workerfn,
                                     (void *)(struct worker *) w);
-               if (ret)
+               if (ret) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_create");
+               }
        }
 
+       CPU_FREE(cpuset);
        if (!noaffinity)
                pthread_attr_destroy(&thread_attr);
 
index de56601f69ee6c32bb2542dfda9d626080c97ee2..5a27691469edb4bf28d16336317e91a95030b023 100644 (file)
@@ -151,7 +151,7 @@ static int bench_evlist_open_close__run(char *evstr)
 
        init_stats(&time_stats);
 
-       printf("  Number of cpus:\t%d\n", perf_cpu_map__nr(evlist->core.cpus));
+       printf("  Number of cpus:\t%d\n", perf_cpu_map__nr(evlist->core.user_requested_cpus));
        printf("  Number of threads:\t%d\n", evlist->core.threads->nr);
        printf("  Number of events:\t%d (%d fds)\n",
                evlist->core.nr_entries, evlist__count_evsel_fds(evlist));
index dbcecec4eedacec29b9f2c88654c7a0d8395fd51..f05db4cf983d6e0c8e32ab7920a6a0865002c870 100644 (file)
@@ -122,12 +122,14 @@ static void print_summary(void)
 int bench_futex_hash(int argc, const char **argv)
 {
        int ret = 0;
-       cpu_set_t cpuset;
+       cpu_set_t *cpuset;
        struct sigaction act;
        unsigned int i;
        pthread_attr_t thread_attr;
        struct worker *worker = NULL;
        struct perf_cpu_map *cpu;
+       int nrcpus;
+       size_t size;
 
        argc = parse_options(argc, argv, options, bench_futex_hash_usage, 0);
        if (argc) {
@@ -170,25 +172,35 @@ int bench_futex_hash(int argc, const char **argv)
        threads_starting = params.nthreads;
        pthread_attr_init(&thread_attr);
        gettimeofday(&bench__start, NULL);
+
+       nrcpus = perf_cpu_map__nr(cpu);
+       cpuset = CPU_ALLOC(nrcpus);
+       BUG_ON(!cpuset);
+       size = CPU_ALLOC_SIZE(nrcpus);
+
        for (i = 0; i < params.nthreads; i++) {
                worker[i].tid = i;
                worker[i].futex = calloc(params.nfutexes, sizeof(*worker[i].futex));
                if (!worker[i].futex)
                        goto errmem;
 
-               CPU_ZERO(&cpuset);
-               CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
+               CPU_ZERO_S(size, cpuset);
 
-               ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset);
-               if (ret)
+               CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
+               ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset);
+               if (ret) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
-
+               }
                ret = pthread_create(&worker[i].thread, &thread_attr, workerfn,
                                     (void *)(struct worker *) &worker[i]);
-               if (ret)
+               if (ret) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_create");
+               }
 
        }
+       CPU_FREE(cpuset);
        pthread_attr_destroy(&thread_attr);
 
        pthread_mutex_lock(&thread_lock);
index 6fc9a3d55c1f768da88b33f2a88e23f4bfab84d9..0abb3f7ee24f78ece65b33c69e2f95461f250d35 100644 (file)
@@ -120,11 +120,17 @@ static void *workerfn(void *arg)
 static void create_threads(struct worker *w, pthread_attr_t thread_attr,
                           struct perf_cpu_map *cpu)
 {
-       cpu_set_t cpuset;
+       cpu_set_t *cpuset;
        unsigned int i;
+       int nrcpus =  perf_cpu_map__nr(cpu);
+       size_t size;
 
        threads_starting = params.nthreads;
 
+       cpuset = CPU_ALLOC(nrcpus);
+       BUG_ON(!cpuset);
+       size = CPU_ALLOC_SIZE(nrcpus);
+
        for (i = 0; i < params.nthreads; i++) {
                worker[i].tid = i;
 
@@ -135,15 +141,20 @@ static void create_threads(struct worker *w, pthread_attr_t thread_attr,
                } else
                        worker[i].futex = &global_futex;
 
-               CPU_ZERO(&cpuset);
-               CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
+               CPU_ZERO_S(size, cpuset);
+               CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
 
-               if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
+               if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+               }
 
-               if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i]))
+               if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i])) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_create");
+               }
        }
+       CPU_FREE(cpuset);
 }
 
 int bench_futex_lock_pi(int argc, const char **argv)
index 2f59d5d1c50968cf906de919e303be1c472aa370..b6faabfafb8eed33d6046c4d74c7c842f23aca17 100644 (file)
@@ -123,22 +123,33 @@ static void *workerfn(void *arg __maybe_unused)
 static void block_threads(pthread_t *w,
                          pthread_attr_t thread_attr, struct perf_cpu_map *cpu)
 {
-       cpu_set_t cpuset;
+       cpu_set_t *cpuset;
        unsigned int i;
+       int nrcpus = perf_cpu_map__nr(cpu);
+       size_t size;
 
        threads_starting = params.nthreads;
 
+       cpuset = CPU_ALLOC(nrcpus);
+       BUG_ON(!cpuset);
+       size = CPU_ALLOC_SIZE(nrcpus);
+
        /* create and block all threads */
        for (i = 0; i < params.nthreads; i++) {
-               CPU_ZERO(&cpuset);
-               CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
+               CPU_ZERO_S(size, cpuset);
+               CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
 
-               if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
+               if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+               }
 
-               if (pthread_create(&w[i], &thread_attr, workerfn, NULL))
+               if (pthread_create(&w[i], &thread_attr, workerfn, NULL)) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_create");
+               }
        }
+       CPU_FREE(cpuset);
 }
 
 static void toggle_done(int sig __maybe_unused,
index 861deb934745d279238b2a91c646bbb315274be8..e47f46a3a47e934db6aa875cfb16e6c1482241bf 100644 (file)
@@ -144,22 +144,33 @@ static void *blocked_workerfn(void *arg __maybe_unused)
 static void block_threads(pthread_t *w, pthread_attr_t thread_attr,
                          struct perf_cpu_map *cpu)
 {
-       cpu_set_t cpuset;
+       cpu_set_t *cpuset;
        unsigned int i;
+       int nrcpus = perf_cpu_map__nr(cpu);
+       size_t size;
 
        threads_starting = params.nthreads;
 
+       cpuset = CPU_ALLOC(nrcpus);
+       BUG_ON(!cpuset);
+       size = CPU_ALLOC_SIZE(nrcpus);
+
        /* create and block all threads */
        for (i = 0; i < params.nthreads; i++) {
-               CPU_ZERO(&cpuset);
-               CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
+               CPU_ZERO_S(size, cpuset);
+               CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
 
-               if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
+               if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+               }
 
-               if (pthread_create(&w[i], &thread_attr, blocked_workerfn, NULL))
+               if (pthread_create(&w[i], &thread_attr, blocked_workerfn, NULL)) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_create");
+               }
        }
+       CPU_FREE(cpuset);
 }
 
 static void print_run(struct thread_data *waking_worker, unsigned int run_num)
index cfda48bef1d72d954a81a71b44bb18a4c9f51efc..201a3555f09a2053fa2e30176faaae25e7003555 100644 (file)
@@ -97,22 +97,32 @@ static void print_summary(void)
 static void block_threads(pthread_t *w,
                          pthread_attr_t thread_attr, struct perf_cpu_map *cpu)
 {
-       cpu_set_t cpuset;
+       cpu_set_t *cpuset;
        unsigned int i;
-
+       size_t size;
+       int nrcpus = perf_cpu_map__nr(cpu);
        threads_starting = params.nthreads;
 
+       cpuset = CPU_ALLOC(nrcpus);
+       BUG_ON(!cpuset);
+       size = CPU_ALLOC_SIZE(nrcpus);
+
        /* create and block all threads */
        for (i = 0; i < params.nthreads; i++) {
-               CPU_ZERO(&cpuset);
-               CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
+               CPU_ZERO_S(size, cpuset);
+               CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
 
-               if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
+               if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+               }
 
-               if (pthread_create(&w[i], &thread_attr, workerfn, NULL))
+               if (pthread_create(&w[i], &thread_attr, workerfn, NULL)) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_create");
+               }
        }
+       CPU_FREE(cpuset);
 }
 
 static void toggle_done(int sig __maybe_unused,
index f2640179ada9edb617bbbd7ad4fab7691f224004..44e1f8a44087e35d60c732bea6aef7146fee3111 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/numa.h>
 #include <linux/zalloc.h>
 
+#include "../util/header.h"
 #include <numa.h>
 #include <numaif.h>
 
@@ -54,7 +55,7 @@
 
 struct thread_data {
        int                     curr_cpu;
-       cpu_set_t               bind_cpumask;
+       cpu_set_t               *bind_cpumask;
        int                     bind_node;
        u8                      *process_data;
        int                     process_nr;
@@ -266,71 +267,115 @@ static bool node_has_cpus(int node)
        return ret;
 }
 
-static cpu_set_t bind_to_cpu(int target_cpu)
+static cpu_set_t *bind_to_cpu(int target_cpu)
 {
-       cpu_set_t orig_mask, mask;
-       int ret;
+       int nrcpus = numa_num_possible_cpus();
+       cpu_set_t *orig_mask, *mask;
+       size_t size;
 
-       ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
-       BUG_ON(ret);
+       orig_mask = CPU_ALLOC(nrcpus);
+       BUG_ON(!orig_mask);
+       size = CPU_ALLOC_SIZE(nrcpus);
+       CPU_ZERO_S(size, orig_mask);
+
+       if (sched_getaffinity(0, size, orig_mask))
+               goto err_out;
+
+       mask = CPU_ALLOC(nrcpus);
+       if (!mask)
+               goto err_out;
 
-       CPU_ZERO(&mask);
+       CPU_ZERO_S(size, mask);
 
        if (target_cpu == -1) {
                int cpu;
 
                for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
-                       CPU_SET(cpu, &mask);
+                       CPU_SET_S(cpu, size, mask);
        } else {
-               BUG_ON(target_cpu < 0 || target_cpu >= g->p.nr_cpus);
-               CPU_SET(target_cpu, &mask);
+               if (target_cpu < 0 || target_cpu >= g->p.nr_cpus)
+                       goto err;
+
+               CPU_SET_S(target_cpu, size, mask);
        }
 
-       ret = sched_setaffinity(0, sizeof(mask), &mask);
-       BUG_ON(ret);
+       if (sched_setaffinity(0, size, mask))
+               goto err;
 
        return orig_mask;
+
+err:
+       CPU_FREE(mask);
+err_out:
+       CPU_FREE(orig_mask);
+
+       /* BUG_ON due to failure in allocation of orig_mask/mask */
+       BUG_ON(-1);
 }
 
-static cpu_set_t bind_to_node(int target_node)
+static cpu_set_t *bind_to_node(int target_node)
 {
-       cpu_set_t orig_mask, mask;
+       int nrcpus = numa_num_possible_cpus();
+       size_t size;
+       cpu_set_t *orig_mask, *mask;
        int cpu;
-       int ret;
 
-       ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
-       BUG_ON(ret);
+       orig_mask = CPU_ALLOC(nrcpus);
+       BUG_ON(!orig_mask);
+       size = CPU_ALLOC_SIZE(nrcpus);
+       CPU_ZERO_S(size, orig_mask);
 
-       CPU_ZERO(&mask);
+       if (sched_getaffinity(0, size, orig_mask))
+               goto err_out;
+
+       mask = CPU_ALLOC(nrcpus);
+       if (!mask)
+               goto err_out;
+
+       CPU_ZERO_S(size, mask);
 
        if (target_node == NUMA_NO_NODE) {
                for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
-                       CPU_SET(cpu, &mask);
+                       CPU_SET_S(cpu, size, mask);
        } else {
                struct bitmask *cpumask = numa_allocate_cpumask();
 
-               BUG_ON(!cpumask);
+               if (!cpumask)
+                       goto err;
+
                if (!numa_node_to_cpus(target_node, cpumask)) {
                        for (cpu = 0; cpu < (int)cpumask->size; cpu++) {
                                if (numa_bitmask_isbitset(cpumask, cpu))
-                                       CPU_SET(cpu, &mask);
+                                       CPU_SET_S(cpu, size, mask);
                        }
                }
                numa_free_cpumask(cpumask);
        }
 
-       ret = sched_setaffinity(0, sizeof(mask), &mask);
-       BUG_ON(ret);
+       if (sched_setaffinity(0, size, mask))
+               goto err;
 
        return orig_mask;
+
+err:
+       CPU_FREE(mask);
+err_out:
+       CPU_FREE(orig_mask);
+
+       /* BUG_ON due to failure in allocation of orig_mask/mask */
+       BUG_ON(-1);
 }
 
-static void bind_to_cpumask(cpu_set_t mask)
+static void bind_to_cpumask(cpu_set_t *mask)
 {
        int ret;
+       size_t size = CPU_ALLOC_SIZE(numa_num_possible_cpus());
 
-       ret = sched_setaffinity(0, sizeof(mask), &mask);
-       BUG_ON(ret);
+       ret = sched_setaffinity(0, size, mask);
+       if (ret) {
+               CPU_FREE(mask);
+               BUG_ON(ret);
+       }
 }
 
 static void mempol_restore(void)
@@ -376,7 +421,7 @@ do {                                                        \
 static u8 *alloc_data(ssize_t bytes0, int map_flags,
                      int init_zero, int init_cpu0, int thp, int init_random)
 {
-       cpu_set_t orig_mask;
+       cpu_set_t *orig_mask = NULL;
        ssize_t bytes;
        u8 *buf;
        int ret;
@@ -434,6 +479,7 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags,
        /* Restore affinity: */
        if (init_cpu0) {
                bind_to_cpumask(orig_mask);
+               CPU_FREE(orig_mask);
                mempol_restore();
        }
 
@@ -585,10 +631,16 @@ static int parse_setup_cpu_list(void)
                        return -1;
                }
 
+               if (is_cpu_online(bind_cpu_0) != 1 || is_cpu_online(bind_cpu_1) != 1) {
+                       printf("\nTest not applicable, bind_cpu_0 or bind_cpu_1 is offline\n");
+                       return -1;
+               }
+
                BUG_ON(bind_cpu_0 < 0 || bind_cpu_1 < 0);
                BUG_ON(bind_cpu_0 > bind_cpu_1);
 
                for (bind_cpu = bind_cpu_0; bind_cpu <= bind_cpu_1; bind_cpu += step) {
+                       size_t size = CPU_ALLOC_SIZE(g->p.nr_cpus);
                        int i;
 
                        for (i = 0; i < mul; i++) {
@@ -608,10 +660,15 @@ static int parse_setup_cpu_list(void)
                                        tprintf("%2d", bind_cpu);
                                }
 
-                               CPU_ZERO(&td->bind_cpumask);
+                               td->bind_cpumask = CPU_ALLOC(g->p.nr_cpus);
+                               BUG_ON(!td->bind_cpumask);
+                               CPU_ZERO_S(size, td->bind_cpumask);
                                for (cpu = bind_cpu; cpu < bind_cpu+bind_len; cpu++) {
-                                       BUG_ON(cpu < 0 || cpu >= g->p.nr_cpus);
-                                       CPU_SET(cpu, &td->bind_cpumask);
+                                       if (cpu < 0 || cpu >= g->p.nr_cpus) {
+                                               CPU_FREE(td->bind_cpumask);
+                                               BUG_ON(-1);
+                                       }
+                                       CPU_SET_S(cpu, size, td->bind_cpumask);
                                }
                                t++;
                        }
@@ -752,8 +809,6 @@ static int parse_nodes_opt(const struct option *opt __maybe_unused,
        return parse_node_list(arg);
 }
 
-#define BIT(x) (1ul << x)
-
 static inline uint32_t lfsr_32(uint32_t lfsr)
 {
        const uint32_t taps = BIT(1) | BIT(5) | BIT(6) | BIT(31);
@@ -1241,7 +1296,7 @@ static void *worker_thread(void *__tdata)
                 * by migrating to CPU#0:
                 */
                if (first_task && g->p.perturb_secs && (int)(stop.tv_sec - last_perturbance) >= g->p.perturb_secs) {
-                       cpu_set_t orig_mask;
+                       cpu_set_t *orig_mask;
                        int target_cpu;
                        int this_cpu;
 
@@ -1265,6 +1320,7 @@ static void *worker_thread(void *__tdata)
                                printf(" (injecting perturbalance, moved to CPU#%d)\n", target_cpu);
 
                        bind_to_cpumask(orig_mask);
+                       CPU_FREE(orig_mask);
                }
 
                if (details >= 3) {
@@ -1398,21 +1454,31 @@ static void init_thread_data(void)
 
        for (t = 0; t < g->p.nr_tasks; t++) {
                struct thread_data *td = g->threads + t;
+               size_t cpuset_size = CPU_ALLOC_SIZE(g->p.nr_cpus);
                int cpu;
 
                /* Allow all nodes by default: */
                td->bind_node = NUMA_NO_NODE;
 
                /* Allow all CPUs by default: */
-               CPU_ZERO(&td->bind_cpumask);
+               td->bind_cpumask = CPU_ALLOC(g->p.nr_cpus);
+               BUG_ON(!td->bind_cpumask);
+               CPU_ZERO_S(cpuset_size, td->bind_cpumask);
                for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
-                       CPU_SET(cpu, &td->bind_cpumask);
+                       CPU_SET_S(cpu, cpuset_size, td->bind_cpumask);
        }
 }
 
 static void deinit_thread_data(void)
 {
        ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
+       int t;
+
+       /* Free the bind_cpumask allocated for thread_data */
+       for (t = 0; t < g->p.nr_tasks; t++) {
+               struct thread_data *td = g->threads + t;
+               CPU_FREE(td->bind_cpumask);
+       }
 
        free_data(g->threads, size);
 }
index ad9ce1bfffa15ea3f683b2c17b03c347f55e1b39..7de07bb16d235fd7e76beb460071368ce121f751 100644 (file)
@@ -301,7 +301,7 @@ static int set_tracing_cpumask(struct perf_cpu_map *cpumap)
 
 static int set_tracing_cpu(struct perf_ftrace *ftrace)
 {
-       struct perf_cpu_map *cpumap = ftrace->evlist->core.cpus;
+       struct perf_cpu_map *cpumap = ftrace->evlist->core.user_requested_cpus;
 
        if (!target__has_cpu(&ftrace->target))
                return 0;
index 0b4abed555d8fa954960eba0772539f4485cceff..069825c48d404bc4dbe4901f0f45b8d97f683ff9 100644 (file)
@@ -987,10 +987,13 @@ static int record__thread_data_init_maps(struct record_thread *thread_data, stru
        int m, tm, nr_mmaps = evlist->core.nr_mmaps;
        struct mmap *mmap = evlist->mmap;
        struct mmap *overwrite_mmap = evlist->overwrite_mmap;
-       struct perf_cpu_map *cpus = evlist->core.cpus;
+       struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
 
-       thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
-                                             thread_data->mask->maps.nbits);
+       if (cpu_map__is_dummy(cpus))
+               thread_data->nr_mmaps = nr_mmaps;
+       else
+               thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
+                                                     thread_data->mask->maps.nbits);
        if (mmap) {
                thread_data->maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *));
                if (!thread_data->maps)
@@ -1007,16 +1010,17 @@ static int record__thread_data_init_maps(struct record_thread *thread_data, stru
                 thread_data->nr_mmaps, thread_data->maps, thread_data->overwrite_maps);
 
        for (m = 0, tm = 0; m < nr_mmaps && tm < thread_data->nr_mmaps; m++) {
-               if (test_bit(cpus->map[m].cpu, thread_data->mask->maps.bits)) {
+               if (cpu_map__is_dummy(cpus) ||
+                   test_bit(cpus->map[m].cpu, thread_data->mask->maps.bits)) {
                        if (thread_data->maps) {
                                thread_data->maps[tm] = &mmap[m];
                                pr_debug2("thread_data[%p]: cpu%d: maps[%d] -> mmap[%d]\n",
-                                         thread_data, cpus->map[m].cpu, tm, m);
+                                         thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
                        }
                        if (thread_data->overwrite_maps) {
                                thread_data->overwrite_maps[tm] = &overwrite_mmap[m];
                                pr_debug2("thread_data[%p]: cpu%d: ow_maps[%d] -> ow_mmap[%d]\n",
-                                         thread_data, cpus->map[m].cpu, tm, m);
+                                         thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
                        }
                        tm++;
                }
@@ -1881,7 +1885,7 @@ static int record__synthesize(struct record *rec, bool tail)
                return err;
        }
 
-       err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus,
+       err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.user_requested_cpus,
                                             process_synthesized_event, NULL);
        if (err < 0) {
                pr_err("Couldn't synthesize cpu map.\n");
@@ -3329,6 +3333,9 @@ static void record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_c
 {
        int c;
 
+       if (cpu_map__is_dummy(cpus))
+               return;
+
        for (c = 0; c < cpus->nr; c++)
                set_bit(cpus->map[c].cpu, mask->bits);
 }
@@ -3675,11 +3682,16 @@ static int record__init_thread_default_masks(struct record *rec, struct perf_cpu
 static int record__init_thread_masks(struct record *rec)
 {
        int ret = 0;
-       struct perf_cpu_map *cpus = rec->evlist->core.cpus;
+       struct perf_cpu_map *cpus = rec->evlist->core.user_requested_cpus;
 
        if (!record__threads_enabled(rec))
                return record__init_thread_default_masks(rec, cpus);
 
+       if (cpu_map__is_dummy(cpus)) {
+               pr_err("--per-thread option is mutually exclusive to parallel streaming mode.\n");
+               return -EINVAL;
+       }
+
        switch (rec->opts.threads_spec) {
        case THREAD_SPEC__CPU:
                ret = record__init_thread_cpu_masks(rec, cpus);
index 4ee40de698a47fc952d97dd16d90ff3aeb0ed455..a96f106dc93a08b32177d3fc51d8e97da3076432 100644 (file)
@@ -804,7 +804,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
        if (group)
                evlist__set_leader(evsel_list);
 
-       if (!cpu_map__is_dummy(evsel_list->core.cpus)) {
+       if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) {
                if (affinity__setup(&saved_affinity) < 0)
                        return -1;
                affinity = &saved_affinity;
@@ -1458,7 +1458,7 @@ static int perf_stat_init_aggr_mode(void)
        aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode);
 
        if (get_id) {
-               stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.cpus,
+               stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus,
                                                         get_id, /*data=*/NULL);
                if (!stat_config.aggr_map) {
                        pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]);
@@ -1472,7 +1472,10 @@ static int perf_stat_init_aggr_mode(void)
         * taking the highest cpu number to be the size of
         * the aggregation translate cpumap.
         */
-       nr = perf_cpu_map__max(evsel_list->core.cpus).cpu;
+       if (evsel_list->core.user_requested_cpus)
+               nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
+       else
+               nr = 0;
        stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1);
        return stat_config.cpus_aggr_map ? 0 : -ENOMEM;
 }
@@ -1627,7 +1630,7 @@ static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
        if (!get_id)
                return 0;
 
-       stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.cpus, get_id, env);
+       stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, get_id, env);
        if (!stat_config.aggr_map) {
                pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]);
                return -1;
index 9b08e44a31d92aaa441cbd7769e102c45d3661fe..fd8fd913c533cfe73b1e517d120fc1e95d0ba18b 100644 (file)
@@ -1021,7 +1021,7 @@ static int perf_top__start_counters(struct perf_top *top)
 
        evlist__for_each_entry(evlist, counter) {
 try_again:
-               if (evsel__open(counter, top->evlist->core.cpus,
+               if (evsel__open(counter, top->evlist->core.user_requested_cpus,
                                     top->evlist->core.threads) < 0) {
 
                        /*
index 2f6b67189b426c33b07f66f6cd6209743afc297b..0170cb0819d6ad530695f75b0af4439725dec69b 100644 (file)
@@ -55,6 +55,7 @@ struct cmd_struct {
 };
 
 static struct cmd_struct commands[] = {
+       { "archive",    NULL,   0 },
        { "buildid-cache", cmd_buildid_cache, 0 },
        { "buildid-list", cmd_buildid_list, 0 },
        { "config",     cmd_config,     0 },
@@ -62,6 +63,7 @@ static struct cmd_struct commands[] = {
        { "diff",       cmd_diff,       0 },
        { "evlist",     cmd_evlist,     0 },
        { "help",       cmd_help,       0 },
+       { "iostat",     NULL,   0 },
        { "kallsyms",   cmd_kallsyms,   0 },
        { "list",       cmd_list,       0 },
        { "record",     cmd_record,     0 },
@@ -360,6 +362,8 @@ static void handle_internal_command(int argc, const char **argv)
 
        for (i = 0; i < ARRAY_SIZE(commands); i++) {
                struct cmd_struct *p = commands+i;
+               if (p->fn == NULL)
+                       continue;
                if (strcmp(p->cmd, cmd))
                        continue;
                exit(run_builtin(p, argc, argv));
@@ -434,7 +438,7 @@ void pthread__unblock_sigwinch(void)
 static int libperf_print(enum libperf_print_level level,
                         const char *fmt, va_list ap)
 {
-       return eprintf(level, verbose, fmt, ap);
+       return veprintf(level, verbose, fmt, ap);
 }
 
 int main(int argc, const char **argv)
index 461848c7f57dca3dc7e61f0df5153309d97a4419..bba68a6d45156183c91d2ba01c6bfee62d26e28d 100755 (executable)
@@ -34,7 +34,7 @@ def main():
             if not isinstance(event, perf.sample_event):
                 continue
 
-            print "time %u prev_comm=%s prev_pid=%d prev_prio=%d prev_state=0x%x ==> next_comm=%s next_pid=%d next_prio=%d" % (
+            print("time %u prev_comm=%s prev_pid=%d prev_prio=%d prev_state=0x%x ==> next_comm=%s next_pid=%d next_prio=%d" % (
                    event.sample_time,
                    event.prev_comm,
                    event.prev_pid,
@@ -42,7 +42,7 @@ def main():
                    event.prev_state,
                    event.next_comm,
                    event.next_pid,
-                   event.next_prio)
+                   event.next_prio))
 
 if __name__ == '__main__':
     main()
index 2dab2d2620608b5b5bf458d76144aa72253321d5..afdca7f2959f07d87716c26508178bb6647bd9cf 100644 (file)
@@ -122,7 +122,7 @@ NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__thread(struct thread *thr
        }
 
        err = unwind__get_entries(unwind_entry, &cnt, thread,
-                                 &sample, MAX_STACK);
+                                 &sample, MAX_STACK, false);
        if (err)
                pr_debug("unwind failed\n");
        else if (cnt != MAX_STACK) {
index d12d0ad8180107f721426b6e437c34f4cb944de8..cc6df49a65a18fd767117b808e2a86d425c9ac25 100644 (file)
        }                                       \
 }
 
+static int test__tsc_is_supported(struct test_suite *test __maybe_unused,
+                                 int subtest __maybe_unused)
+{
+       if (!TSC_IS_SUPPORTED) {
+               pr_debug("Test not supported on this architecture\n");
+               return TEST_SKIP;
+       }
+
+       return TEST_OK;
+}
+
 /**
  * test__perf_time_to_tsc - test converting perf time to TSC.
  *
@@ -70,7 +81,7 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
        struct perf_cpu_map *cpus = NULL;
        struct evlist *evlist = NULL;
        struct evsel *evsel = NULL;
-       int err = -1, ret, i;
+       int err = TEST_FAIL, ret, i;
        const char *comm1, *comm2;
        struct perf_tsc_conversion tc;
        struct perf_event_mmap_page *pc;
@@ -79,10 +90,6 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
        u64 test_time, comm1_time = 0, comm2_time = 0;
        struct mmap *md;
 
-       if (!TSC_IS_SUPPORTED) {
-               pr_debug("Test not supported on this architecture");
-               return TEST_SKIP;
-       }
 
        threads = thread_map__new(-1, getpid(), UINT_MAX);
        CHECK_NOT_NULL__(threads);
@@ -124,8 +131,8 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
        ret = perf_read_tsc_conversion(pc, &tc);
        if (ret) {
                if (ret == -EOPNOTSUPP) {
-                       fprintf(stderr, " (not supported)");
-                       return 0;
+                       pr_debug("perf_read_tsc_conversion is not supported in current kernel\n");
+                       err = TEST_SKIP;
                }
                goto out_err;
        }
@@ -191,7 +198,7 @@ next_event:
            test_tsc >= comm2_tsc)
                goto out_err;
 
-       err = 0;
+       err = TEST_OK;
 
 out_err:
        evlist__delete(evlist);
@@ -200,4 +207,15 @@ out_err:
        return err;
 }
 
-DEFINE_SUITE("Convert perf time to TSC", perf_time_to_tsc);
+static struct test_case time_to_tsc_tests[] = {
+       TEST_CASE_REASON("TSC support", tsc_is_supported,
+                        "This architecture does not support"),
+       TEST_CASE_REASON("Perf time to TSC", perf_time_to_tsc,
+                        "perf_read_tsc_conversion is not supported"),
+       { .name = NULL, }
+};
+
+struct test_suite suite__perf_time_to_tsc = {
+       .desc = "Convert perf time to TSC",
+       .test_cases = time_to_tsc_tests,
+};
index 8ef26d89ef495f6dbff49681961899e099dde544..6f85f5d957efea06c7fc19c78181feda9e57aead 100644 (file)
@@ -366,6 +366,7 @@ struct ucred {
 #define SOL_XDP                283
 #define SOL_MPTCP      284
 #define SOL_MCTP       285
+#define SOL_SMC                286
 
 /* IPX options */
 #define IPX_TYPE       1
index e4c641b240df4dcaeb52d8d822f9243a64b777f9..82cc396ef516c45c75163538502ecdcba94b7d74 100644 (file)
@@ -2047,6 +2047,7 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
        objdump_process.argv = objdump_argv;
        objdump_process.out = -1;
        objdump_process.err = -1;
+       objdump_process.no_stderr = 1;
        if (start_command(&objdump_process)) {
                pr_err("Failure starting to run %s\n", command);
                err = -1;
index 2242a885fbd73387ffd347deba3bceb4ac74a95a..4940be4a0569cd441cee2c265433a2a06181c8fa 100644 (file)
@@ -53,7 +53,7 @@ u64 get_leaf_frame_caller_aarch64(struct perf_sample *sample, struct thread *thr
                sample->user_regs.cache_regs[PERF_REG_ARM64_SP] = 0;
        }
 
-       ret = unwind__get_entries(add_entry, &entries, thread, sample, 2);
+       ret = unwind__get_entries(add_entry, &entries, thread, sample, 2, true);
        sample->user_regs = old_regs;
 
        if (ret || entries.length != 2)
index 9e48652662d4ed4441a4257316875b9110e999bd..df1c5bbbaa0dd592b51afe28fa54fd14acb1330f 100644 (file)
@@ -174,7 +174,7 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
        mp->idx = idx;
 
        if (per_cpu) {
-               mp->cpu = perf_cpu_map__cpu(evlist->core.cpus, idx);
+               mp->cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, idx);
                if (evlist->core.threads)
                        mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
                else
index 4f4d3aaff37c7f7d4ba70c98f80148690d50d56d..7a4297d8fd2ce9250caa848475aa0bf9b68e79c8 100644 (file)
@@ -38,7 +38,7 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
 
        /* don't need to set cpu filter for system-wide mode */
        if (ftrace->target.cpu_list) {
-               ncpus = perf_cpu_map__nr(ftrace->evlist->core.cpus);
+               ncpus = perf_cpu_map__nr(ftrace->evlist->core.user_requested_cpus);
                bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
        }
 
@@ -63,7 +63,7 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
                fd = bpf_map__fd(skel->maps.cpu_filter);
 
                for (i = 0; i < ncpus; i++) {
-                       cpu = perf_cpu_map__cpu(ftrace->evlist->core.cpus, i).cpu;
+                       cpu = perf_cpu_map__cpu(ftrace->evlist->core.user_requested_cpus, i).cpu;
                        bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
                }
        }
index 9bb79e0499575f11391d267a781dbb54d8dcd5d3..52ea004ba01e520d095e811c077011ff6c84012f 100644 (file)
@@ -440,7 +440,7 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name)
        bool has_imm = false;
 
        // See explanation in evlist__close()
-       if (!cpu_map__is_dummy(evlist->core.cpus)) {
+       if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
                if (affinity__setup(&saved_affinity) < 0)
                        return;
                affinity = &saved_affinity;
@@ -500,7 +500,7 @@ static void __evlist__enable(struct evlist *evlist, char *evsel_name)
        struct affinity saved_affinity, *affinity = NULL;
 
        // See explanation in evlist__close()
-       if (!cpu_map__is_dummy(evlist->core.cpus)) {
+       if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
                if (affinity__setup(&saved_affinity) < 0)
                        return;
                affinity = &saved_affinity;
@@ -565,7 +565,7 @@ static int evlist__enable_event_cpu(struct evlist *evlist, struct evsel *evsel,
 static int evlist__enable_event_thread(struct evlist *evlist, struct evsel *evsel, int thread)
 {
        int cpu;
-       int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
+       int nr_cpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
 
        if (!evsel->core.fd)
                return -EINVAL;
@@ -580,7 +580,7 @@ static int evlist__enable_event_thread(struct evlist *evlist, struct evsel *evse
 
 int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx)
 {
-       bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus);
+       bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.user_requested_cpus);
 
        if (per_cpu_mmaps)
                return evlist__enable_event_cpu(evlist, evsel, idx);
@@ -1301,10 +1301,11 @@ void evlist__close(struct evlist *evlist)
        struct affinity affinity;
 
        /*
-        * With perf record core.cpus is usually NULL.
+        * With perf record core.user_requested_cpus is usually NULL.
         * Use the old method to handle this for now.
         */
-       if (!evlist->core.cpus || cpu_map__is_dummy(evlist->core.cpus)) {
+       if (!evlist->core.user_requested_cpus ||
+           cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
                evlist__for_each_entry_reverse(evlist, evsel)
                        evsel__close(evsel);
                return;
@@ -1330,7 +1331,6 @@ static int evlist__create_syswide_maps(struct evlist *evlist)
 {
        struct perf_cpu_map *cpus;
        struct perf_thread_map *threads;
-       int err = -ENOMEM;
 
        /*
         * Try reading /sys/devices/system/cpu/online to get
@@ -1355,7 +1355,7 @@ static int evlist__create_syswide_maps(struct evlist *evlist)
 out_put:
        perf_cpu_map__put(cpus);
 out:
-       return err;
+       return -ENOMEM;
 }
 
 int evlist__open(struct evlist *evlist)
@@ -1367,7 +1367,7 @@ int evlist__open(struct evlist *evlist)
         * Default: one fd per CPU, all threads, aka systemwide
         * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
         */
-       if (evlist->core.threads == NULL && evlist->core.cpus == NULL) {
+       if (evlist->core.threads == NULL && evlist->core.user_requested_cpus == NULL) {
                err = evlist__create_syswide_maps(evlist);
                if (err < 0)
                        goto out_err;
index 3c20b126d60d869aa136b24d4b01c93d6302938e..aeb09c2887162d169c4c8f0d4718a59df8051678 100644 (file)
@@ -75,7 +75,7 @@ void hashmap__clear(struct hashmap *map)
 
 void hashmap__free(struct hashmap *map)
 {
-       if (!map)
+       if (IS_ERR_OR_NULL(map))
                return;
 
        hashmap__clear(map);
@@ -238,4 +238,3 @@ bool hashmap__delete(struct hashmap *map, const void *key,
 
        return true;
 }
-
index d546ff724dbe250c78784758959c3bfe630597ee..a27132e5a5efee55a2936ecb14b0044a13d57c7b 100644 (file)
@@ -983,6 +983,57 @@ static int write_dir_format(struct feat_fd *ff,
        return do_write(ff, &data->dir.version, sizeof(data->dir.version));
 }
 
+/*
+ * Check whether a CPU is online
+ *
+ * Returns:
+ *     1 -> if CPU is online
+ *     0 -> if CPU is offline
+ *    -1 -> error case
+ */
+int is_cpu_online(unsigned int cpu)
+{
+       char *str;
+       size_t strlen;
+       char buf[256];
+       int status = -1;
+       struct stat statbuf;
+
+       snprintf(buf, sizeof(buf),
+               "/sys/devices/system/cpu/cpu%d", cpu);
+       if (stat(buf, &statbuf) != 0)
+               return 0;
+
+       /*
+        * Check if /sys/devices/system/cpu/cpux/online file
+        * exists. Some cases cpu0 won't have online file since
+        * it is not expected to be turned off generally.
+        * In kernels without CONFIG_HOTPLUG_CPU, this
+        * file won't exist
+        */
+       snprintf(buf, sizeof(buf),
+               "/sys/devices/system/cpu/cpu%d/online", cpu);
+       if (stat(buf, &statbuf) != 0)
+               return 1;
+
+       /*
+        * Read online file using sysfs__read_str.
+        * If read or open fails, return -1.
+        * If read succeeds, return value from file
+        * which gets stored in "str"
+        */
+       snprintf(buf, sizeof(buf),
+               "devices/system/cpu/cpu%d/online", cpu);
+
+       if (sysfs__read_str(buf, &str, &strlen) < 0)
+               return status;
+
+       status = atoi(str);
+
+       free(str);
+       return status;
+}
+
 #ifdef HAVE_LIBBPF_SUPPORT
 static int write_bpf_prog_info(struct feat_fd *ff,
                               struct evlist *evlist __maybe_unused)
index c9e3265832d92c1d44a1833e4e2880d4a1bf8c60..0eb4bc29a5a46c2862f49ec4058712a644f3565b 100644 (file)
@@ -158,6 +158,7 @@ int do_write(struct feat_fd *fd, const void *buf, size_t size);
 int write_padded(struct feat_fd *fd, const void *bf,
                 size_t count, size_t count_aligned);
 
+int is_cpu_online(unsigned int cpu);
 /*
  * arch specific callback
  */
index b80048546451359291fc886b6322d4f8c35cb600..95391236f5f6a9103a233d0e8feffb30b49e995a 100644 (file)
@@ -2987,7 +2987,7 @@ static int thread__resolve_callchain_unwind(struct thread *thread,
                return 0;
 
        return unwind__get_entries(unwind_entry, cursor,
-                                  thread, sample, max_stack);
+                                  thread, sample, max_stack, false);
 }
 
 int thread__resolve_callchain(struct thread *thread,
index 24997925ae00d66a95c58b2afdac3d54894661f3..dd84fed698a3bedd087a61ea9aa478ee15995fff 100644 (file)
@@ -1523,7 +1523,9 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
        bool use_uncore_alias;
        LIST_HEAD(config_terms);
 
-       if (verbose > 1) {
+       pmu = parse_state->fake_pmu ?: perf_pmu__find(name);
+
+       if (verbose > 1 && !(pmu && pmu->selectable)) {
                fprintf(stderr, "Attempting to add event pmu '%s' with '",
                        name);
                if (head_config) {
@@ -1536,7 +1538,6 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
                fprintf(stderr, "' that may result in non-fatal errors\n");
        }
 
-       pmu = parse_state->fake_pmu ?: perf_pmu__find(name);
        if (!pmu) {
                char *err_str;
 
index 007a64681416bfce3f2d3e8f5d78d574b410c77f..5b09ecbb05dc2788cebc266a63a840e7aa36da8c 100644 (file)
@@ -106,7 +106,7 @@ void evlist__config(struct evlist *evlist, struct record_opts *opts, struct call
        if (opts->group)
                evlist__set_leader(evlist);
 
-       if (perf_cpu_map__cpu(evlist->core.cpus, 0).cpu < 0)
+       if (perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0).cpu < 0)
                opts->no_inherit = true;
 
        use_comm_exec = perf_can_comm_exec();
@@ -244,7 +244,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str)
 
        evsel = evlist__last(temp_evlist);
 
-       if (!evlist || perf_cpu_map__empty(evlist->core.cpus)) {
+       if (!evlist || perf_cpu_map__empty(evlist->core.user_requested_cpus)) {
                struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
 
                if (cpus)
@@ -252,7 +252,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str)
 
                perf_cpu_map__put(cpus);
        } else {
-               cpu = perf_cpu_map__cpu(evlist->core.cpus, 0);
+               cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0);
        }
 
        while (1) {
index 3b8dfe603e50b1a51f8b58a1e8c23716728aee48..45a30040ec8d04c926924d4db05e951a740e5a2c 100644 (file)
@@ -2095,6 +2095,7 @@ prefetch_event(char *buf, u64 head, size_t mmap_size,
               bool needs_swap, union perf_event *error)
 {
        union perf_event *event;
+       u16 event_size;
 
        /*
         * Ensure we have enough space remaining to read
@@ -2107,15 +2108,23 @@ prefetch_event(char *buf, u64 head, size_t mmap_size,
        if (needs_swap)
                perf_event_header__bswap(&event->header);
 
-       if (head + event->header.size <= mmap_size)
+       event_size = event->header.size;
+       if (head + event_size <= mmap_size)
                return event;
 
        /* We're not fetching the event so swap back again */
        if (needs_swap)
                perf_event_header__bswap(&event->header);
 
-       pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:"
-                " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size);
+       /* Check if the event fits into the next mmapped buf. */
+       if (event_size <= mmap_size - head % page_size) {
+               /* Remap buf and fetch again. */
+               return NULL;
+       }
+
+       /* Invalid input. Event size should never exceed mmap_size. */
+       pr_debug("%s: head=%#" PRIx64 " event->header.size=%#x, mmap_size=%#zx:"
+                " fuzzed or compressed perf.data?\n", __func__, head, event_size, mmap_size);
 
        return error;
 }
index 483f05004e682081be7b87c8dcb20c4e244d9c68..c255a2c90cd672b1ecfa36eaa0883045f34175f6 100644 (file)
@@ -1,12 +1,14 @@
-from os import getenv
+from os import getenv, path
 from subprocess import Popen, PIPE
 from re import sub
 
 cc = getenv("CC")
 cc_is_clang = b"clang version" in Popen([cc.split()[0], "-v"], stderr=PIPE).stderr.readline()
+src_feature_tests  = getenv('srctree') + '/tools/build/feature'
 
 def clang_has_option(option):
-    return [o for o in Popen([cc, option], stderr=PIPE).stderr.readlines() if b"unknown argument" in o] == [ ]
+    cc_output = Popen([cc, option, path.join(src_feature_tests, "test-hello.c") ], stderr=PIPE).stderr.readlines()
+    return [o for o in cc_output if ((b"unknown argument" in o) or (b"is not supported" in o))] == [ ]
 
 if cc_is_clang:
     from distutils.sysconfig import get_config_vars
@@ -23,6 +25,8 @@ if cc_is_clang:
             vars[var] = sub("-fstack-protector-strong", "", vars[var])
         if not clang_has_option("-fno-semantic-interposition"):
             vars[var] = sub("-fno-semantic-interposition", "", vars[var])
+        if not clang_has_option("-ffat-lto-objects"):
+            vars[var] = sub("-ffat-lto-objects", "", vars[var])
 
 from distutils.core import setup, Extension
 
index 748371ac22bec8d942e6716d998bfa7f83d3ee7d..388846f17bc13fb93a1be77c118d63e67857fff7 100644 (file)
@@ -114,7 +114,8 @@ int evlist__start_sb_thread(struct evlist *evlist, struct target *target)
        }
 
        evlist__for_each_entry(evlist, counter) {
-               if (evsel__open(counter, evlist->core.cpus, evlist->core.threads) < 0)
+               if (evsel__open(counter, evlist->core.user_requested_cpus,
+                               evlist->core.threads) < 0)
                        goto out_delete_evlist;
        }
 
index 9cbe351b141fd09a91d454d5037a9221c741f858..138e3ab9d638d6d967b9f0a0f6766237ec904db1 100644 (file)
@@ -929,7 +929,7 @@ static void print_no_aggr_metric(struct perf_stat_config *config,
        int all_idx;
        struct perf_cpu cpu;
 
-       perf_cpu_map__for_each_cpu(cpu, all_idx, evlist->core.cpus) {
+       perf_cpu_map__for_each_cpu(cpu, all_idx, evlist->core.user_requested_cpus) {
                struct evsel *counter;
                bool first = true;
 
index ee6f0348121514220df185ec7e19c4afb548850e..817a2de264b46956ff9073d6179be2d48c6eeffe 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <errno.h>
+#include <linux/err.h>
 #include <inttypes.h>
 #include <math.h>
 #include <string.h>
@@ -311,7 +312,7 @@ static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals,
 
        if (!mask) {
                mask = hashmap__new(pkg_id_hash, pkg_id_equal, NULL);
-               if (!mask)
+               if (IS_ERR(mask))
                        return -ENOMEM;
 
                counter->per_pkg_mask = mask;
index b654de0841f87422cdf5f7033a3f9be96d4fc31f..27acdc5e572360bdcc66358a97249ce75cbb3994 100644 (file)
@@ -2127,7 +2127,7 @@ int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct p
                return err;
        }
 
-       err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, process, NULL);
+       err = perf_event__synthesize_cpu_map(tool, evlist->core.user_requested_cpus, process, NULL);
        if (err < 0) {
                pr_err("Couldn't synthesize thread map.\n");
                return err;
index c1ebfc5d2e0cbb4e02a0d5e08d226180003fc0d0..b8b32431d2f7b2bae16032becface59afa17976a 100644 (file)
@@ -95,15 +95,17 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
 
        if (target->cpu_list)
                ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
-                               perf_cpu_map__nr(top->evlist->core.cpus) > 1 ? "s" : "",
+                               perf_cpu_map__nr(top->evlist->core.user_requested_cpus) > 1
+                               ? "s" : "",
                                target->cpu_list);
        else {
                if (target->tid)
                        ret += SNPRINTF(bf + ret, size - ret, ")");
                else
                        ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
-                                       perf_cpu_map__nr(top->evlist->core.cpus),
-                                       perf_cpu_map__nr(top->evlist->core.cpus) > 1 ? "s" : "");
+                                       perf_cpu_map__nr(top->evlist->core.user_requested_cpus),
+                                       perf_cpu_map__nr(top->evlist->core.user_requested_cpus) > 1
+                                       ? "s" : "");
        }
 
        perf_top__reset_sample_counters(top);
index a74b517f74974dc9d1ecef4c1306915ca6c91850..94aa40f6e3482fc13fe13c70be640a19004f8fdd 100644 (file)
@@ -200,7 +200,8 @@ frame_callback(Dwfl_Frame *state, void *arg)
        bool isactivation;
 
        if (!dwfl_frame_pc(state, &pc, NULL)) {
-               pr_err("%s", dwfl_errmsg(-1));
+               if (!ui->best_effort)
+                       pr_err("%s", dwfl_errmsg(-1));
                return DWARF_CB_ABORT;
        }
 
@@ -208,7 +209,8 @@ frame_callback(Dwfl_Frame *state, void *arg)
        report_module(pc, ui);
 
        if (!dwfl_frame_pc(state, &pc, &isactivation)) {
-               pr_err("%s", dwfl_errmsg(-1));
+               if (!ui->best_effort)
+                       pr_err("%s", dwfl_errmsg(-1));
                return DWARF_CB_ABORT;
        }
 
@@ -222,7 +224,8 @@ frame_callback(Dwfl_Frame *state, void *arg)
 int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
                        struct thread *thread,
                        struct perf_sample *data,
-                       int max_stack)
+                       int max_stack,
+                       bool best_effort)
 {
        struct unwind_info *ui, ui_buf = {
                .sample         = data,
@@ -231,6 +234,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
                .cb             = cb,
                .arg            = arg,
                .max_stack      = max_stack,
+               .best_effort    = best_effort
        };
        Dwarf_Word ip;
        int err = -EINVAL, i;
index 0cbd2650e280e52635f7e0d9058467d633239c5f..8c88bc4f2304b59561ffd482469ca63ce0f531f0 100644 (file)
@@ -20,6 +20,7 @@ struct unwind_info {
        void                    *arg;
        int                     max_stack;
        int                     idx;
+       bool                    best_effort;
        struct unwind_entry     entries[];
 };
 
index 71a3533491815749bd1ec84e659d659cba09957c..41e29fc7648ae9f79fe258d731b5cd1172c61cd6 100644 (file)
@@ -96,6 +96,7 @@ struct unwind_info {
        struct perf_sample      *sample;
        struct machine          *machine;
        struct thread           *thread;
+       bool                     best_effort;
 };
 
 #define dw_read(ptr, type, end) ({     \
@@ -553,7 +554,8 @@ static int access_reg(unw_addr_space_t __maybe_unused as,
 
        ret = perf_reg_value(&val, &ui->sample->user_regs, id);
        if (ret) {
-               pr_err("unwind: can't read reg %d\n", regnum);
+               if (!ui->best_effort)
+                       pr_err("unwind: can't read reg %d\n", regnum);
                return ret;
        }
 
@@ -666,7 +668,7 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
                        return -1;
 
                ret = unw_init_remote(&c, addr_space, ui);
-               if (ret)
+               if (ret && !ui->best_effort)
                        display_error(ret);
 
                while (!ret && (unw_step(&c) > 0) && i < max_stack) {
@@ -704,12 +706,14 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
 
 static int _unwind__get_entries(unwind_entry_cb_t cb, void *arg,
                        struct thread *thread,
-                       struct perf_sample *data, int max_stack)
+                       struct perf_sample *data, int max_stack,
+                       bool best_effort)
 {
        struct unwind_info ui = {
                .sample       = data,
                .thread       = thread,
                .machine      = thread->maps->machine,
+               .best_effort  = best_effort
        };
 
        if (!data->user_regs.regs)
index e89a5479b361303e8b323182dd5509a18e5ead1b..509c287ee762808061821a9d2c52301e3b6955e4 100644 (file)
@@ -80,9 +80,11 @@ void unwind__finish_access(struct maps *maps)
 
 int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
                         struct thread *thread,
-                        struct perf_sample *data, int max_stack)
+                        struct perf_sample *data, int max_stack,
+                        bool best_effort)
 {
        if (thread->maps->unwind_libunwind_ops)
-               return thread->maps->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack);
+               return thread->maps->unwind_libunwind_ops->get_entries(cb, arg, thread, data,
+                                                                      max_stack, best_effort);
        return 0;
 }
index ab8ad469c8de5aea6c7bf437bfee156f1b0ed2ed..b2a03fa5289b37c2744aab105415e8febe250f0b 100644 (file)
@@ -23,13 +23,19 @@ struct unwind_libunwind_ops {
        void (*finish_access)(struct maps *maps);
        int (*get_entries)(unwind_entry_cb_t cb, void *arg,
                           struct thread *thread,
-                          struct perf_sample *data, int max_stack);
+                          struct perf_sample *data, int max_stack, bool best_effort);
 };
 
 #ifdef HAVE_DWARF_UNWIND_SUPPORT
+/*
+ * When best_effort is set, don't report errors and fail silently. This could
+ * be expanded in the future to be more permissive about things other than
+ * error messages.
+ */
 int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
                        struct thread *thread,
-                       struct perf_sample *data, int max_stack);
+                       struct perf_sample *data, int max_stack,
+                       bool best_effort);
 /* libunwind specific */
 #ifdef HAVE_LIBUNWIND_SUPPORT
 #ifndef LIBUNWIND__ARCH_REG_ID
@@ -65,7 +71,8 @@ unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
                    void *arg __maybe_unused,
                    struct thread *thread __maybe_unused,
                    struct perf_sample *data __maybe_unused,
-                   int max_stack __maybe_unused)
+                   int max_stack __maybe_unused,
+                   bool best_effort __maybe_unused)
 {
        return 0;
 }
index d2fba1297d96433c85e8228d46535e4cdb6029f1..846f785e278d7f4c7071ee0ef93c9372796c3b9a 100644 (file)
@@ -47,7 +47,7 @@ $(OUTPUT)intel-speed-select: $(ISST_IN)
 clean:
        rm -f $(ALL_PROGRAMS)
        rm -rf $(OUTPUT)include/linux/isst_if.h
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(bindir);          \
index a2335e4021455a3ac691e4fcb6ba34cac01b316c..0efb8f2b33cef0b1972ef102e586249510522b1b 100644 (file)
@@ -52,11 +52,17 @@ define allow-override
 endef
 
 ifneq ($(LLVM),)
-$(call allow-override,CC,clang)
-$(call allow-override,AR,llvm-ar)
-$(call allow-override,LD,ld.lld)
-$(call allow-override,CXX,clang++)
-$(call allow-override,STRIP,llvm-strip)
+ifneq ($(filter %/,$(LLVM)),)
+LLVM_PREFIX := $(LLVM)
+else ifneq ($(filter -%,$(LLVM)),)
+LLVM_SUFFIX := $(LLVM)
+endif
+
+$(call allow-override,CC,$(LLVM_PREFIX)clang$(LLVM_SUFFIX))
+$(call allow-override,AR,$(LLVM_PREFIX)llvm-ar$(LLVM_SUFFIX))
+$(call allow-override,LD,$(LLVM_PREFIX)ld.lld$(LLVM_SUFFIX))
+$(call allow-override,CXX,$(LLVM_PREFIX)clang++$(LLVM_SUFFIX))
+$(call allow-override,STRIP,$(LLVM_PREFIX)llvm-strip$(LLVM_SUFFIX))
 else
 # Allow setting various cross-compile vars or setting CROSS_COMPILE as a prefix.
 $(call allow-override,CC,$(CROSS_COMPILE)gcc)
@@ -69,9 +75,9 @@ endif
 CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?)
 
 ifneq ($(LLVM),)
-HOSTAR  ?= llvm-ar
-HOSTCC  ?= clang
-HOSTLD  ?= ld.lld
+HOSTAR  ?= $(LLVM_PREFIX)llvm-ar$(LLVM_SUFFIX)
+HOSTCC  ?= $(LLVM_PREFIX)clang$(LLVM_SUFFIX)
+HOSTLD  ?= $(LLVM_PREFIX)ld.lld$(LLVM_SUFFIX)
 else
 HOSTAR  ?= ar
 HOSTCC  ?= gcc
index c16ce833079c0a307642f2ae0e75f9c0d577c4d8..172e47273b5d995a7eb3083921bfb74dde5c50a5 100644 (file)
@@ -175,5 +175,5 @@ _ge-abspath = $(if $(is-executable),$(1))
 define get-executable-or-default
 $(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2)))
 endef
-_ge_attempt = $(if $(get-executable),$(get-executable),$(call _gea_err,$(2)))
+_ge_attempt = $(or $(get-executable),$(call _gea_err,$(2)))
 _gea_err  = $(if $(1),$(error Please set '$(1)' appropriately))
index 0aa6dbd31fb8d3c17dd1f4b47b2f3a11ff56bdbc..7fccd245a53515ab019529c8e5e91a044eaaa68b 100644 (file)
@@ -53,9 +53,9 @@ $(OUTPUT)spidev_fdx: $(SPIDEV_FDX_IN)
 clean:
        rm -f $(ALL_PROGRAMS)
        rm -rf $(OUTPUT)include/
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '\.*.o.d' -delete
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '\.*.o.cmd' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete
+       find $(or $(OUTPUT),.) -name '\.*.o.d' -delete
+       find $(or $(OUTPUT),.) -name '\.*.o.cmd' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(bindir);          \
index 65dbdda3a0544652f7d266e2b2a51a7f6d0f5361..1da76ccde448f894a81fcc58a315a864d28577bc 100644 (file)
@@ -1842,7 +1842,7 @@ static int nfit_test_dimm_init(struct nfit_test *t)
        return 0;
 }
 
-static void security_init(struct nfit_test *t)
+static void nfit_security_init(struct nfit_test *t)
 {
        int i;
 
@@ -1938,7 +1938,7 @@ static int nfit_test0_alloc(struct nfit_test *t)
        if (nfit_test_dimm_init(t))
                return -ENOMEM;
        smart_init(t);
-       security_init(t);
+       nfit_security_init(t);
        return ars_state_init(&t->pdev.dev, &t->ars_state);
 }
 
index 429f7ee735cf4f1554ddd5bb2807aa60cebece41..fd23c80eba315a17d9d1ffdb38e6b0e9c2f70c91 100755 (executable)
@@ -159,6 +159,17 @@ flooding_remotes_add()
        local lsb
        local i
 
+       # Prevent unwanted packets from entering the bridge and interfering
+       # with the test.
+       tc qdisc add dev br0 clsact
+       tc filter add dev br0 egress protocol all pref 1 handle 1 \
+               matchall skip_hw action drop
+       tc qdisc add dev $h1 clsact
+       tc filter add dev $h1 egress protocol all pref 1 handle 1 \
+               flower skip_hw dst_mac de:ad:be:ef:13:37 action pass
+       tc filter add dev $h1 egress protocol all pref 2 handle 2 \
+               matchall skip_hw action drop
+
        for i in $(eval echo {1..$num_remotes}); do
                lsb=$((i + 1))
 
@@ -195,6 +206,12 @@ flooding_filters_del()
        done
 
        tc qdisc del dev $rp2 clsact
+
+       tc filter del dev $h1 egress protocol all pref 2 handle 2 matchall
+       tc filter del dev $h1 egress protocol all pref 1 handle 1 flower
+       tc qdisc del dev $h1 clsact
+       tc filter del dev br0 egress protocol all pref 1 handle 1 matchall
+       tc qdisc del dev br0 clsact
 }
 
 flooding_check_packets()
index fedcb7b35af9f3f2f412ba6a1cadb56b3e35d71d..af5ea50ed5c0ecac41b22d844979f1df59ca106a 100755 (executable)
@@ -172,6 +172,17 @@ flooding_filters_add()
        local lsb
        local i
 
+       # Prevent unwanted packets from entering the bridge and interfering
+       # with the test.
+       tc qdisc add dev br0 clsact
+       tc filter add dev br0 egress protocol all pref 1 handle 1 \
+               matchall skip_hw action drop
+       tc qdisc add dev $h1 clsact
+       tc filter add dev $h1 egress protocol all pref 1 handle 1 \
+               flower skip_hw dst_mac de:ad:be:ef:13:37 action pass
+       tc filter add dev $h1 egress protocol all pref 2 handle 2 \
+               matchall skip_hw action drop
+
        tc qdisc add dev $rp2 clsact
 
        for i in $(eval echo {1..$num_remotes}); do
@@ -194,6 +205,12 @@ flooding_filters_del()
        done
 
        tc qdisc del dev $rp2 clsact
+
+       tc filter del dev $h1 egress protocol all pref 2 handle 2 matchall
+       tc filter del dev $h1 egress protocol all pref 1 handle 1 flower
+       tc qdisc del dev $h1 clsact
+       tc filter del dev br0 egress protocol all pref 1 handle 1 matchall
+       tc qdisc del dev br0 clsact
 }
 
 flooding_check_packets()
index 11779405dc804d34ef8ac767ec5e22b6d2b5c0ba..25f4d54067c0ee243114bdf6733390a761d26b5b 100644 (file)
@@ -64,6 +64,7 @@
 #include <sys/types.h>
 #include <sys/wait.h>
 #include <unistd.h>
+#include <setjmp.h>
 
 #include "kselftest.h"
 
                struct __test_metadata *_metadata, \
                struct __fixture_variant_metadata *variant) \
        { \
-               test_name(_metadata); \
+               _metadata->setup_completed = true; \
+               if (setjmp(_metadata->env) == 0) \
+                       test_name(_metadata); \
+               __test_check_assert(_metadata); \
        } \
        static struct __test_metadata _##test_name##_object = \
                { .name = #test_name, \
 #define FIXTURE_TEARDOWN(fixture_name) \
        void fixture_name##_teardown( \
                struct __test_metadata __attribute__((unused)) *_metadata, \
-               FIXTURE_DATA(fixture_name) __attribute__((unused)) *self)
+               FIXTURE_DATA(fixture_name) __attribute__((unused)) *self, \
+               const FIXTURE_VARIANT(fixture_name) \
+                       __attribute__((unused)) *variant)
 
 /**
  * FIXTURE_VARIANT() - Optionally called once per fixture
  *       ...
  *     };
  *
- * Defines type of constant parameters provided to FIXTURE_SETUP() and TEST_F()
- * as *variant*. Variants allow the same tests to be run with different
- * arguments.
+ * Defines type of constant parameters provided to FIXTURE_SETUP(), TEST_F() and
+ * FIXTURE_TEARDOWN as *variant*. Variants allow the same tests to be run with
+ * different arguments.
  */
 #define FIXTURE_VARIANT(fixture_name) struct _fixture_variant_##fixture_name
 
  * Defines a test that depends on a fixture (e.g., is part of a test case).
  * Very similar to TEST() except that *self* is the setup instance of fixture's
  * datatype exposed for use by the implementation.
- *
- * Warning: use of ASSERT_* here will skip TEARDOWN.
  */
-/* TODO(wad) register fixtures on dedicated test lists. */
 #define TEST_F(fixture_name, test_name) \
        __TEST_F_IMPL(fixture_name, test_name, -1, TEST_TIMEOUT_DEFAULT)
 
                /* fixture data is alloced, setup, and torn down per call. */ \
                FIXTURE_DATA(fixture_name) self; \
                memset(&self, 0, sizeof(FIXTURE_DATA(fixture_name))); \
-               fixture_name##_setup(_metadata, &self, variant->data); \
-               /* Let setup failure terminate early. */ \
-               if (!_metadata->passed) \
-                       return; \
-               fixture_name##_##test_name(_metadata, &self, variant->data); \
-               fixture_name##_teardown(_metadata, &self); \
+               if (setjmp(_metadata->env) == 0) { \
+                       fixture_name##_setup(_metadata, &self, variant->data); \
+                       /* Let setup failure terminate early. */ \
+                       if (!_metadata->passed) \
+                               return; \
+                       _metadata->setup_completed = true; \
+                       fixture_name##_##test_name(_metadata, &self, variant->data); \
+               } \
+               if (_metadata->setup_completed) \
+                       fixture_name##_teardown(_metadata, &self, variant->data); \
+               __test_check_assert(_metadata); \
        } \
        static struct __test_metadata \
                      _##fixture_name##_##test_name##_object = { \
  */
 #define OPTIONAL_HANDLER(_assert) \
        for (; _metadata->trigger; _metadata->trigger = \
-                       __bail(_assert, _metadata->no_print, _metadata->step))
+                       __bail(_assert, _metadata))
 
 #define __INC_STEP(_metadata) \
        /* Keep "step" below 255 (which is used for "SKIP" reporting). */       \
@@ -830,6 +838,9 @@ struct __test_metadata {
        bool timed_out; /* did this test timeout instead of exiting? */
        __u8 step;
        bool no_print; /* manual trigger when TH_LOG_STREAM is not available */
+       bool aborted;   /* stopped test due to failed ASSERT */
+       bool setup_completed; /* did setup finish? */
+       jmp_buf env;    /* for exiting out of test early */
        struct __test_results *results;
        struct __test_metadata *prev, *next;
 };
@@ -848,16 +859,26 @@ static inline void __register_test(struct __test_metadata *t)
        __LIST_APPEND(t->fixture->tests, t);
 }
 
-static inline int __bail(int for_realz, bool no_print, __u8 step)
+static inline int __bail(int for_realz, struct __test_metadata *t)
 {
+       /* if this is ASSERT, return immediately. */
        if (for_realz) {
-               if (no_print)
-                       _exit(step);
-               abort();
+               t->aborted = true;
+               longjmp(t->env, 1);
        }
+       /* otherwise, end the for loop and continue. */
        return 0;
 }
 
+static inline void __test_check_assert(struct __test_metadata *t)
+{
+       if (t->aborted) {
+               if (t->no_print)
+                       _exit(t->step);
+               abort();
+       }
+}
+
 struct __test_metadata *__active_test;
 static void __timeout_handler(int sig, siginfo_t *info, void *ucontext)
 {
index d1e8f523746976fac6b217ef0ef1a0ef61c7bd51..0b0e4402bba6ae13e66cc812b64f7a128a1ca4ab 100644 (file)
@@ -3,6 +3,7 @@
 /aarch64/debug-exceptions
 /aarch64/get-reg-list
 /aarch64/psci_cpu_on_test
+/aarch64/vcpu_width_config
 /aarch64/vgic_init
 /aarch64/vgic_irq
 /s390x/memop
@@ -33,6 +34,7 @@
 /x86_64/state_test
 /x86_64/svm_vmcall_test
 /x86_64/svm_int_ctl_test
+/x86_64/tsc_scaling_sync
 /x86_64/sync_regs_test
 /x86_64/tsc_msrs_test
 /x86_64/userspace_io_test
index 21c2dbd21a81cfff52576f32049c717c7ac63da6..681b173aa87c17e5d467071f59d23764b2023759 100644 (file)
@@ -106,6 +106,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/arch_timer
 TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
 TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
 TEST_GEN_PROGS_aarch64 += aarch64/psci_cpu_on_test
+TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config
 TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
 TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
 TEST_GEN_PROGS_aarch64 += demand_paging_test
index b08d30bf71c513f443d665d5e30f368842d8d275..3b940a101bc0790cd27c5f18edd152cb5ff2b481 100644 (file)
@@ -362,11 +362,12 @@ static void test_init_timer_irq(struct kvm_vm *vm)
        pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq);
 }
 
+static int gic_fd;
+
 static struct kvm_vm *test_vm_create(void)
 {
        struct kvm_vm *vm;
        unsigned int i;
-       int ret;
        int nr_vcpus = test_args.nr_vcpus;
 
        vm = vm_create_default_with_vcpus(nr_vcpus, 0, 0, guest_code, NULL);
@@ -383,8 +384,8 @@ static struct kvm_vm *test_vm_create(void)
 
        ucall_init(vm, NULL);
        test_init_timer_irq(vm);
-       ret = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
-       if (ret < 0) {
+       gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
+       if (gic_fd < 0) {
                print_skip("Failed to create vgic-v3");
                exit(KSFT_SKIP);
        }
@@ -395,6 +396,12 @@ static struct kvm_vm *test_vm_create(void)
        return vm;
 }
 
+static void test_vm_cleanup(struct kvm_vm *vm)
+{
+       close(gic_fd);
+       kvm_vm_free(vm);
+}
+
 static void test_print_help(char *name)
 {
        pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n",
@@ -478,7 +485,7 @@ int main(int argc, char *argv[])
 
        vm = test_vm_create();
        test_run(vm);
-       kvm_vm_free(vm);
+       test_vm_cleanup(vm);
 
        return 0;
 }
index f12147c43464e5c2a9cd7561e46c4459d48dd17d..0b571f3fe64ce8a36880e4a7460009efa1e4703f 100644 (file)
@@ -503,8 +503,13 @@ static void run_test(struct vcpu_config *c)
                ++missing_regs;
 
        if (new_regs || missing_regs) {
+               n = 0;
+               for_each_reg_filtered(i)
+                       ++n;
+
                printf("%s: Number blessed registers: %5lld\n", config_name(c), blessed_n);
-               printf("%s: Number registers:         %5lld\n", config_name(c), reg_list->n);
+               printf("%s: Number registers:         %5lld (includes %lld filtered registers)\n",
+                      config_name(c), reg_list->n, reg_list->n - n);
        }
 
        if (new_regs) {
@@ -683,9 +688,10 @@ static __u64 base_regs[] = {
        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[4]),
        KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpsr),
        KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpcr),
-       KVM_REG_ARM_FW_REG(0),
-       KVM_REG_ARM_FW_REG(1),
-       KVM_REG_ARM_FW_REG(2),
+       KVM_REG_ARM_FW_REG(0),          /* KVM_REG_ARM_PSCI_VERSION */
+       KVM_REG_ARM_FW_REG(1),          /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 */
+       KVM_REG_ARM_FW_REG(2),          /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 */
+       KVM_REG_ARM_FW_REG(3),          /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 */
        ARM64_SYS_REG(3, 3, 14, 3, 1),  /* CNTV_CTL_EL0 */
        ARM64_SYS_REG(3, 3, 14, 3, 2),  /* CNTV_CVAL_EL0 */
        ARM64_SYS_REG(3, 3, 14, 0, 2),
diff --git a/tools/testing/selftests/kvm/aarch64/vcpu_width_config.c b/tools/testing/selftests/kvm/aarch64/vcpu_width_config.c
new file mode 100644 (file)
index 0000000..6e94026
--- /dev/null
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vcpu_width_config - Test KVM_ARM_VCPU_INIT() with KVM_ARM_VCPU_EL1_32BIT.
+ *
+ * Copyright (c) 2022 Google LLC.
+ *
+ * This is a test that ensures that non-mixed-width vCPUs (all 64bit vCPUs
+ * or all 32bit vcPUs) can be configured and mixed-width vCPUs cannot be
+ * configured.
+ */
+
+#include "kvm_util.h"
+#include "processor.h"
+#include "test_util.h"
+
+
+/*
+ * Add a vCPU, run KVM_ARM_VCPU_INIT with @init1, and then
+ * add another vCPU, and run KVM_ARM_VCPU_INIT with @init2.
+ */
+static int add_init_2vcpus(struct kvm_vcpu_init *init1,
+                          struct kvm_vcpu_init *init2)
+{
+       struct kvm_vm *vm;
+       int ret;
+
+       vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+
+       vm_vcpu_add(vm, 0);
+       ret = _vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
+       if (ret)
+               goto free_exit;
+
+       vm_vcpu_add(vm, 1);
+       ret = _vcpu_ioctl(vm, 1, KVM_ARM_VCPU_INIT, init2);
+
+free_exit:
+       kvm_vm_free(vm);
+       return ret;
+}
+
+/*
+ * Add two vCPUs, then run KVM_ARM_VCPU_INIT for one vCPU with @init1,
+ * and run KVM_ARM_VCPU_INIT for another vCPU with @init2.
+ */
+static int add_2vcpus_init_2vcpus(struct kvm_vcpu_init *init1,
+                                 struct kvm_vcpu_init *init2)
+{
+       struct kvm_vm *vm;
+       int ret;
+
+       vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+
+       vm_vcpu_add(vm, 0);
+       vm_vcpu_add(vm, 1);
+
+       ret = _vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
+       if (ret)
+               goto free_exit;
+
+       ret = _vcpu_ioctl(vm, 1, KVM_ARM_VCPU_INIT, init2);
+
+free_exit:
+       kvm_vm_free(vm);
+       return ret;
+}
+
+/*
+ * Tests that two 64bit vCPUs can be configured, two 32bit vCPUs can be
+ * configured, and two mixed-width vCPUs cannot be configured.
+ * Each of those three cases, configure vCPUs in two different orders.
+ * The one is running KVM_CREATE_VCPU for 2 vCPUs, and then running
+ * KVM_ARM_VCPU_INIT for them.
+ * The other is running KVM_CREATE_VCPU and KVM_ARM_VCPU_INIT for a vCPU,
+ * and then run those commands for another vCPU.
+ */
+int main(void)
+{
+       struct kvm_vcpu_init init1, init2;
+       struct kvm_vm *vm;
+       int ret;
+
+       if (!kvm_check_cap(KVM_CAP_ARM_EL1_32BIT)) {
+               print_skip("KVM_CAP_ARM_EL1_32BIT is not supported");
+               exit(KSFT_SKIP);
+       }
+
+       /* Get the preferred target type and copy that to init2 for later use */
+       vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+       vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init1);
+       kvm_vm_free(vm);
+       init2 = init1;
+
+       /* Test with 64bit vCPUs */
+       ret = add_init_2vcpus(&init1, &init1);
+       TEST_ASSERT(ret == 0,
+                   "Configuring 64bit EL1 vCPUs failed unexpectedly");
+       ret = add_2vcpus_init_2vcpus(&init1, &init1);
+       TEST_ASSERT(ret == 0,
+                   "Configuring 64bit EL1 vCPUs failed unexpectedly");
+
+       /* Test with 32bit vCPUs */
+       init1.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
+       ret = add_init_2vcpus(&init1, &init1);
+       TEST_ASSERT(ret == 0,
+                   "Configuring 32bit EL1 vCPUs failed unexpectedly");
+       ret = add_2vcpus_init_2vcpus(&init1, &init1);
+       TEST_ASSERT(ret == 0,
+                   "Configuring 32bit EL1 vCPUs failed unexpectedly");
+
+       /* Test with mixed-width vCPUs  */
+       init1.features[0] = 0;
+       init2.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
+       ret = add_init_2vcpus(&init1, &init2);
+       TEST_ASSERT(ret != 0,
+                   "Configuring mixed-width vCPUs worked unexpectedly");
+       ret = add_2vcpus_init_2vcpus(&init1, &init2);
+       TEST_ASSERT(ret != 0,
+                   "Configuring mixed-width vCPUs worked unexpectedly");
+
+       return 0;
+}
index c9d9e513ca04457a80f14c6ad2d039dbe677ae12..7b47ae4f952e68a5003092321814c3265d3bc08f 100644 (file)
 #include "test_util.h"
 #include "perf_test_util.h"
 #include "guest_modes.h"
+
 #ifdef __aarch64__
 #include "aarch64/vgic.h"
 
 #define GICD_BASE_GPA                  0x8000000ULL
 #define GICR_BASE_GPA                  0x80A0000ULL
+
+static int gic_fd;
+
+static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
+{
+       /*
+        * The test can still run even if hardware does not support GICv3, as it
+        * is only an optimization to reduce guest exits.
+        */
+       gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
+}
+
+static void arch_cleanup_vm(struct kvm_vm *vm)
+{
+       if (gic_fd > 0)
+               close(gic_fd);
+}
+
+#else /* __aarch64__ */
+
+static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
+{
+}
+
+static void arch_cleanup_vm(struct kvm_vm *vm)
+{
+}
+
 #endif
 
 /* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
@@ -206,9 +235,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
                vm_enable_cap(vm, &cap);
        }
 
-#ifdef __aarch64__
-       vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
-#endif
+       arch_setup_vm(vm, nr_vcpus);
 
        /* Start the iterations */
        iteration = 0;
@@ -302,6 +329,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
        }
 
        free_bitmaps(bitmaps, p->slots);
+       arch_cleanup_vm(vm);
        perf_test_destroy_vm(vm);
 }
 
index dc284c6bdbc3714e47d99cda508f5f1f820fc37d..eca5c622efd25b09732523b2b2962e488fb4ad3d 100644 (file)
@@ -101,7 +101,9 @@ static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id,
 #define PGTBL_PTE_WRITE_SHIFT                  2
 #define PGTBL_PTE_READ_MASK                    0x0000000000000002ULL
 #define PGTBL_PTE_READ_SHIFT                   1
-#define PGTBL_PTE_PERM_MASK                    (PGTBL_PTE_EXECUTE_MASK | \
+#define PGTBL_PTE_PERM_MASK                    (PGTBL_PTE_ACCESSED_MASK | \
+                                                PGTBL_PTE_DIRTY_MASK | \
+                                                PGTBL_PTE_EXECUTE_MASK | \
                                                 PGTBL_PTE_WRITE_MASK | \
                                                 PGTBL_PTE_READ_MASK)
 #define PGTBL_PTE_VALID_MASK                   0x0000000000000001ULL
index d377f2603d98a0b7be7fc1304a478dbcd8209164..3961487a4870db7b3f85e41e7dd44f681ad4d08e 100644 (file)
@@ -268,7 +268,7 @@ void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
                core.regs.t3, core.regs.t4, core.regs.t5, core.regs.t6);
 }
 
-static void guest_hang(void)
+static void __aligned(16) guest_hang(void)
 {
        while (1)
                ;
index a40add31a2e3ff7272942b6e8a542863b4aba461..2a2d240cdc1b4624ccc2608a578a9c7a34116c1b 100644 (file)
@@ -1,7 +1,13 @@
 # This mimics the top-level Makefile. We do it explicitly here so that this
 # Makefile can operate with or without the kbuild infrastructure.
 ifneq ($(LLVM),)
-CC := clang
+ifneq ($(filter %/,$(LLVM)),)
+LLVM_PREFIX := $(LLVM)
+else ifneq ($(filter -%,$(LLVM)),)
+LLVM_SUFFIX := $(LLVM)
+endif
+
+CC := $(LLVM_PREFIX)clang$(LLVM_SUFFIX)
 else
 CC := $(CROSS_COMPILE)gcc
 endif
index b019e0b8221c7c0bf565d163e141d9e69d37014f..84fda3b490735faa7d3daeb05e58deca9f865f5f 100644 (file)
@@ -180,6 +180,9 @@ void shutdown(int exit_val, char *err_cause, int line_no)
        if (in_shutdown++)
                return;
 
+       /* Free the cpu_set allocated using CPU_ALLOC in main function */
+       CPU_FREE(cpu_set);
+
        for (i = 0; i < num_cpus_to_pin; i++)
                if (cpu_threads[i]) {
                        pthread_kill(cpu_threads[i], SIGUSR1);
@@ -551,6 +554,12 @@ int main(int argc, char *argv[])
                perror("sysconf(_SC_NPROCESSORS_ONLN)");
                exit(1);
        }
+
+       if (getuid() != 0)
+               ksft_exit_skip("Not running as root, but almost all tests "
+                       "require root in order to modify\nsystem settings.  "
+                       "Exiting.\n");
+
        cpus_online = min(MAX_CPUS, sysconf(_SC_NPROCESSORS_ONLN));
        cpu_set = CPU_ALLOC(cpus_online);
        if (cpu_set == NULL) {
@@ -589,7 +598,7 @@ int main(int argc, char *argv[])
                                                cpu_set)) {
                                        fprintf(stderr, "Any given CPU may "
                                                "only be given once.\n");
-                                       exit(1);
+                                       goto err_code;
                                } else
                                        CPU_SET_S(cpus_to_pin[cpu],
                                                  cpu_set_size, cpu_set);
@@ -607,7 +616,7 @@ int main(int argc, char *argv[])
                                queue_path = malloc(strlen(option) + 2);
                                if (!queue_path) {
                                        perror("malloc()");
-                                       exit(1);
+                                       goto err_code;
                                }
                                queue_path[0] = '/';
                                queue_path[1] = 0;
@@ -622,17 +631,12 @@ int main(int argc, char *argv[])
                fprintf(stderr, "Must pass at least one CPU to continuous "
                        "mode.\n");
                poptPrintUsage(popt_context, stderr, 0);
-               exit(1);
+               goto err_code;
        } else if (!continuous_mode) {
                num_cpus_to_pin = 1;
                cpus_to_pin[0] = cpus_online - 1;
        }
 
-       if (getuid() != 0)
-               ksft_exit_skip("Not running as root, but almost all tests "
-                       "require root in order to modify\nsystem settings.  "
-                       "Exiting.\n");
-
        max_msgs = fopen(MAX_MSGS, "r+");
        max_msgsize = fopen(MAX_MSGSIZE, "r+");
        if (!max_msgs)
@@ -740,4 +744,9 @@ int main(int argc, char *argv[])
                        sleep(1);
        }
        shutdown(0, "", 0);
+
+err_code:
+       CPU_FREE(cpu_set);
+       exit(1);
+
 }
index dcaefa224ca01f724d0f5549507a75cf0d55bcaf..edafaca1aeb39a8ac19aeca05040d1c6ebae3c95 100644 (file)
@@ -1,8 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 CFLAGS += -g -I../../../../usr/include/
 
-TEST_GEN_PROGS := regression_enomem
+TEST_GEN_PROGS = regression_enomem
 
-include ../lib.mk
+LOCAL_HDRS += $(selfdir)/pidfd/pidfd.h
 
-$(OUTPUT)/regression_enomem: regression_enomem.c ../pidfd/pidfd.h
+include ../lib.mk
index 17999e082aa715525f013f194b278e1bdf6786ad..070c1c876df15146d59ebab18353e62ad5c87362 100644 (file)
@@ -95,7 +95,6 @@ TEST(wait_states)
                .flags = CLONE_PIDFD | CLONE_PARENT_SETTID,
                .exit_signal = SIGCHLD,
        };
-       int ret;
        pid_t pid;
        siginfo_t info = {
                .si_signo = 0,
index 18a3bde8bc961eaf36c222cee19f00973c011640..28604c9f805c757bd108b1ef0b269e907539d5b1 100644 (file)
@@ -46,6 +46,8 @@
 #include <sys/time.h>
 #include <sys/resource.h>
 
+#include "../kselftest.h"
+
 static inline long sys_execveat(int dirfd, const char *pathname, char **argv, char **envp, int flags)
 {
        return syscall(SYS_execveat, dirfd, pathname, argv, envp, flags);
@@ -368,7 +370,7 @@ int main(void)
                };
                int i;
 
-               for (i = 0; i < sizeof(S)/sizeof(S[0]); i++) {
+               for (i = 0; i < ARRAY_SIZE(S); i++) {
                        assert(memmem(buf, rv, S[i], strlen(S[i])));
                }
 
@@ -417,7 +419,7 @@ int main(void)
                };
                int i;
 
-               for (i = 0; i < sizeof(S)/sizeof(S[0]); i++) {
+               for (i = 0; i < ARRAY_SIZE(S); i++) {
                        assert(memmem(buf, rv, S[i], strlen(S[i])));
                }
        }
index c4aea794725a7e502b1334efdfdc512446b40c52..e691a3cf14911237190c048f658830078dc90c3b 100644 (file)
@@ -20,6 +20,7 @@
 #include <limits.h>
 
 #include "vdso_config.h"
+#include "../kselftest.h"
 
 static const char **name;
 
@@ -306,10 +307,8 @@ static void test_clock_gettime(void)
                return;
        }
 
-       for (int clock = 0; clock < sizeof(clocknames) / sizeof(clocknames[0]);
-            clock++) {
+       for (int clock = 0; clock < ARRAY_SIZE(clocknames); clock++)
                test_one_clock_gettime(clock, clocknames[clock]);
-       }
 
        /* Also test some invalid clock ids */
        test_one_clock_gettime(-1, "invalid");
@@ -370,10 +369,8 @@ static void test_clock_gettime64(void)
                return;
        }
 
-       for (int clock = 0; clock < sizeof(clocknames) / sizeof(clocknames[0]);
-            clock++) {
+       for (int clock = 0; clock < ARRAY_SIZE(clocknames); clock++)
                test_one_clock_gettime64(clock, clocknames[clock]);
-       }
 
        /* Also test some invalid clock ids */
        test_one_clock_gettime64(-1, "invalid");
index a85025d7206eec0b349d4aef5373dcda03dc0f8c..a9b4fe7950480fe842b0a819df5e429dc259d48a 100644 (file)
@@ -1,3 +1,4 @@
+CONFIG_ACPI=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_CMDLINE_BOOL=y
index 00a1ef4869d58ae86774cf5ba6c51363d903fb95..45dd53a0d76005fec85c7b5513fbcc902ed3a006 100644 (file)
@@ -1,3 +1,4 @@
+CONFIG_ACPI=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_CMDLINE_BOOL=y
index 53df7d3893d31cfc3c60ebd2d612dc2546cdbc07..0388c4d60af0e34f192b8869e21cf76cb61aa8a5 100644 (file)
@@ -92,6 +92,10 @@ warn_32bit_failure:
        echo "If you are using a Fedora-like distribution, try:";       \
        echo "";                                                        \
        echo "  yum install glibc-devel.*i686";                         \
+       echo "";                                                        \
+       echo "If you are using a SUSE-like distribution, try:";         \
+       echo "";                                                        \
+       echo "  zypper install gcc-32bit glibc-devel-static-32bit";     \
        exit 0;
 endif
 
index 3615ef4a48bbb9d6857c68efca95d250426fb0bb..2189f0322d8bf34eca506fc4a33af52875da2e02 100644 (file)
@@ -368,9 +368,16 @@ static void req_xtiledata_perm(void)
 
 static void validate_req_xcomp_perm(enum expected_result exp)
 {
-       unsigned long bitmask;
+       unsigned long bitmask, expected_bitmask;
        long rc;
 
+       rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_PERM, &bitmask);
+       if (rc) {
+               fatal_error("prctl(ARCH_GET_XCOMP_PERM) error: %ld", rc);
+       } else if (!(bitmask & XFEATURE_MASK_XTILECFG)) {
+               fatal_error("ARCH_GET_XCOMP_PERM returns XFEATURE_XTILECFG off.");
+       }
+
        rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA);
        if (exp == FAIL_EXPECTED) {
                if (rc) {
@@ -383,10 +390,15 @@ static void validate_req_xcomp_perm(enum expected_result exp)
                fatal_error("ARCH_REQ_XCOMP_PERM saw unexpected failure.\n");
        }
 
+       expected_bitmask = bitmask | XFEATURE_MASK_XTILEDATA;
+
        rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_PERM, &bitmask);
        if (rc) {
                fatal_error("prctl(ARCH_GET_XCOMP_PERM) error: %ld", rc);
-       } else if (bitmask & XFEATURE_MASK_XTILE) {
+       } else if (bitmask != expected_bitmask) {
+               fatal_error("ARCH_REQ_XCOMP_PERM set a wrong bitmask: %lx, expected: %lx.\n",
+                           bitmask, expected_bitmask);
+       } else {
                printf("\tARCH_REQ_XCOMP_PERM is successful.\n");
        }
 }
index 5a1eda61799246aadbe604c5a742529e97dfc999..11fb417abb42f51f289456043f8db1ed60dc159b 100644 (file)
@@ -46,7 +46,7 @@ DATADIR       :=      /usr/share
 DOCDIR :=      $(DATADIR)/doc
 MANDIR :=      $(DATADIR)/man
 LICDIR :=      $(DATADIR)/licenses
-SRCTREE        :=      $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR))
+SRCTREE        :=      $(or $(BUILD_SRC),$(CURDIR))
 
 # If running from the tarball, man pages are stored in the Documentation
 # dir. If running from the kernel source, man pages are stored in
index 1b128e551b2e428d0cb5cbb3c862be0e9d7d2963..c6235667dd46223b646bbab5c82c810221750182 100644 (file)
@@ -38,7 +38,7 @@ $(OUTPUT)ffs-test: $(FFS_TEST_IN)
 
 clean:
        rm -f $(ALL_PROGRAMS)
-       find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete -o -name '\.*.o.cmd' -delete
+       find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete -o -name '\.*.o.cmd' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(bindir);          \
index 0d7bbe49359d834cd9eb211128c39fe7bb06b47a..1b25cc7c64bbd389e2e248d665cdcdd724a0c3f8 100644 (file)
@@ -5,7 +5,8 @@ virtio_test: virtio_ring.o virtio_test.o
 vringh_test: vringh_test.o vringh.o virtio_ring.o
 
 CFLAGS += -g -O2 -Werror -Wno-maybe-uninitialized -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h
-LDFLAGS += -lpthread
+CFLAGS += -pthread
+LDFLAGS += -pthread
 vpath %.c ../../drivers/virtio ../../drivers/vhost
 mod:
        ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test V=${V}
index 8f41cd6bd5c088d9c9d0828331a139cf2cc94316..834a90bd3270e59d6456e3c51f40fcae35e4c4bc 100644 (file)
@@ -26,8 +26,8 @@ enum dma_data_direction {
 #define dma_map_single(d, p, s, dir) (virt_to_phys(p))
 #define dma_mapping_error(...) (0)
 
-#define dma_unmap_single(...) do { } while (0)
-#define dma_unmap_page(...) do { } while (0)
+#define dma_unmap_single(d, a, s, r) do { (void)(d); (void)(a); (void)(s); (void)(r); } while (0)
+#define dma_unmap_page(d, a, s, r) do { (void)(d); (void)(a); (void)(s); (void)(r); } while (0)
 
 #define dma_max_mapping_size(...) SIZE_MAX
 
index 7679335fce5be5a73baad608d3c9ef081cbd94d6..7d98e76c22910b811f6c4a75ab0988035d7fb213 100644 (file)
@@ -441,7 +441,6 @@ static void usage(void)
                "-n\t\tSort by task command name.\n"
                "-a\t\tSort by memory allocate time.\n"
                "-r\t\tSort by memory release time.\n"
-               "-c\t\tCull by comparing stacktrace instead of total block.\n"
                "-f\t\tFilter out the information of blocks whose memory has been released.\n"
                "--pid <PID>\tSelect by pid. This selects the information of blocks whose process ID number equals to <PID>.\n"
                "--tgid <TGID>\tSelect by tgid. This selects the information of blocks whose Thread Group ID number equals to <TGID>.\n"
@@ -466,14 +465,11 @@ int main(int argc, char **argv)
                { 0, 0, 0, 0},
        };
 
-       while ((opt = getopt_long(argc, argv, "acfmnprstP", longopts, NULL)) != -1)
+       while ((opt = getopt_long(argc, argv, "afmnprstP", longopts, NULL)) != -1)
                switch (opt) {
                case 'a':
                        cmp = compare_ts;
                        break;
-               case 'c':
-                       cull = cull | CULL_STACKTRACE;
-                       break;
                case 'f':
                        filter = filter | FILTER_UNRELEASE;
                        break;
index cc0d2824e1006626c1d1bbcc4efc47af6a135e1d..59d9e8b07a017ce2fe26833d32d941e20d59d954 100644 (file)
@@ -3,7 +3,7 @@
 # kbuild file for usr/ - including initramfs image
 #
 
-compress-y                                     := shipped
+compress-y                                     := copy
 compress-$(CONFIG_INITRAMFS_COMPRESSION_GZIP)  := gzip
 compress-$(CONFIG_INITRAMFS_COMPRESSION_BZIP2) := bzip2
 compress-$(CONFIG_INITRAMFS_COMPRESSION_LZMA)  := lzma
@@ -37,7 +37,7 @@ endif
 # .cpio.*, use it directly as an initramfs, and avoid double compression.
 ifeq ($(words $(subst .cpio.,$(space),$(ramfs-input))),2)
 cpio-data := $(ramfs-input)
-compress-y := shipped
+compress-y := copy
 endif
 
 endif
index 7b283d43f00d3560ad53e0886f0d0884248c546f..fa9819e022b7178052c796715afe4abad98bfcb7 100644 (file)
@@ -10,7 +10,10 @@ UAPI_CFLAGS := -std=c90 -Wall -Werror=implicit-function-declaration
 
 # In theory, we do not care -m32 or -m64 for header compile tests.
 # It is here just because CONFIG_CC_CAN_LINK is tested with -m32 or -m64.
-UAPI_CFLAGS += $(filter -m32 -m64, $(KBUILD_CFLAGS))
+UAPI_CFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
+
+# USERCFLAGS might contain sysroot location for CC.
+UAPI_CFLAGS += $(USERCFLAGS)
 
 override c_flags = $(UAPI_CFLAGS) -Wp,-MMD,$(depfile) -I$(objtree)/usr/include
 
@@ -84,7 +87,7 @@ endif
 # asm-generic/*.h is used by asm/*.h, and should not be included directly
 no-header-test += asm-generic/%
 
-extra-y := $(patsubst $(obj)/%.h,%.hdrtest, $(shell find $(obj) -name '*.h' 2>/dev/null))
+always-y := $(patsubst $(obj)/%.h,%.hdrtest, $(shell find $(obj) -name '*.h' 2>/dev/null))
 
 # Include the header twice to detect missing include guard.
 quiet_cmd_hdrtest = HDRTEST $<
index 69c318fdff616d43799a30dc7f7f999144767df2..dfb7dabdbc63dee5435a7041d53e45e261792afa 100644 (file)
@@ -117,6 +117,8 @@ EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
 
 static const struct file_operations stat_fops_per_vm;
 
+static struct file_operations kvm_chardev_ops;
+
 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
                           unsigned long arg);
 #ifdef CONFIG_KVM_COMPAT
@@ -251,7 +253,8 @@ static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req,
 {
        int cpu;
 
-       kvm_make_request(req, vcpu);
+       if (likely(!(req & KVM_REQUEST_NO_ACTION)))
+               __kvm_make_request(req, vcpu);
 
        if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
                return;
@@ -431,8 +434,8 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
 
 static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
-       kvm_dirty_ring_free(&vcpu->dirty_ring);
        kvm_arch_vcpu_destroy(vcpu);
+       kvm_dirty_ring_free(&vcpu->dirty_ring);
 
        /*
         * No need for rcu_read_lock as VCPU_RUN is the only place that changes
@@ -929,7 +932,7 @@ static void kvm_destroy_vm_debugfs(struct kvm *kvm)
        int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
                                      kvm_vcpu_stats_header.num_desc;
 
-       if (!kvm->debugfs_dentry)
+       if (IS_ERR(kvm->debugfs_dentry))
                return;
 
        debugfs_remove_recursive(kvm->debugfs_dentry);
@@ -952,6 +955,12 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
        int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
                                      kvm_vcpu_stats_header.num_desc;
 
+       /*
+        * Force subsequent debugfs file creations to fail if the VM directory
+        * is not created.
+        */
+       kvm->debugfs_dentry = ERR_PTR(-ENOENT);
+
        if (!debugfs_initialized())
                return 0;
 
@@ -1131,6 +1140,16 @@ static struct kvm *kvm_create_vm(unsigned long type)
        preempt_notifier_inc();
        kvm_init_pm_notifier(kvm);
 
+       /*
+        * When the fd passed to this ioctl() is opened it pins the module,
+        * but try_module_get() also prevents getting a reference if the module
+        * is in MODULE_STATE_GOING (e.g. if someone ran "rmmod --wait").
+        */
+       if (!try_module_get(kvm_chardev_ops.owner)) {
+               r = -ENODEV;
+               goto out_err;
+       }
+
        return kvm;
 
 out_err:
@@ -1220,6 +1239,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
        preempt_notifier_dec();
        hardware_disable_all();
        mmdrop(mm);
+       module_put(kvm_chardev_ops.owner);
 }
 
 void kvm_get_kvm(struct kvm *kvm)
@@ -3663,7 +3683,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
        return 0;
 }
 
-static struct file_operations kvm_vcpu_fops = {
+static const struct file_operations kvm_vcpu_fops = {
        .release        = kvm_vcpu_release,
        .unlocked_ioctl = kvm_vcpu_ioctl,
        .mmap           = kvm_vcpu_mmap,
@@ -4714,7 +4734,7 @@ static long kvm_vm_compat_ioctl(struct file *filp,
 }
 #endif
 
-static struct file_operations kvm_vm_fops = {
+static const struct file_operations kvm_vm_fops = {
        .release        = kvm_vm_release,
        .unlocked_ioctl = kvm_vm_ioctl,
        .llseek         = noop_llseek,
@@ -5465,7 +5485,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
        }
        add_uevent_var(env, "PID=%d", kvm->userspace_pid);
 
-       if (kvm->debugfs_dentry) {
+       if (!IS_ERR(kvm->debugfs_dentry)) {
                char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
 
                if (p) {
@@ -5721,8 +5741,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
                goto out_free_5;
 
        kvm_chardev_ops.owner = module;
-       kvm_vm_fops.owner = module;
-       kvm_vcpu_fops.owner = module;
 
        r = misc_register(&kvm_dev);
        if (r) {
index ce878f4be4daab2dc5e1677b2a3c3edd760959e9..dd84676615f1a62a05e8da01290cf66f3c3f6792 100644 (file)
@@ -27,7 +27,7 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
 {
        DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
        struct gfn_to_pfn_cache *gpc;
-       bool wake_vcpus = false;
+       bool evict_vcpus = false;
 
        spin_lock(&kvm->gpc_lock);
        list_for_each_entry(gpc, &kvm->gpc_list, list) {
@@ -40,41 +40,32 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
 
                        /*
                         * If a guest vCPU could be using the physical address,
-                        * it needs to be woken.
+                        * it needs to be forced out of guest mode.
                         */
-                       if (gpc->guest_uses_pa) {
-                               if (!wake_vcpus) {
-                                       wake_vcpus = true;
+                       if (gpc->usage & KVM_GUEST_USES_PFN) {
+                               if (!evict_vcpus) {
+                                       evict_vcpus = true;
                                        bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
                                }
                                __set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap);
                        }
-
-                       /*
-                        * We cannot call mark_page_dirty() from here because
-                        * this physical CPU might not have an active vCPU
-                        * with which to do the KVM dirty tracking.
-                        *
-                        * Neither is there any point in telling the kernel MM
-                        * that the underlying page is dirty. A vCPU in guest
-                        * mode might still be writing to it up to the point
-                        * where we wake them a few lines further down anyway.
-                        *
-                        * So all the dirty marking happens on the unmap.
-                        */
                }
                write_unlock_irq(&gpc->lock);
        }
        spin_unlock(&kvm->gpc_lock);
 
-       if (wake_vcpus) {
-               unsigned int req = KVM_REQ_GPC_INVALIDATE;
+       if (evict_vcpus) {
+               /*
+                * KVM needs to ensure the vCPU is fully out of guest context
+                * before allowing the invalidation to continue.
+                */
+               unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE;
                bool called;
 
                /*
                 * If the OOM reaper is active, then all vCPUs should have
                 * been stopped already, so perform the request without
-                * KVM_REQUEST_WAIT and be sad if any needed to be woken.
+                * KVM_REQUEST_WAIT and be sad if any needed to be IPI'd.
                 */
                if (!may_block)
                        req &= ~KVM_REQUEST_WAIT;
@@ -104,8 +95,7 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
 }
 EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_check);
 
-static void __release_gpc(struct kvm *kvm, kvm_pfn_t pfn, void *khva,
-                         gpa_t gpa, bool dirty)
+static void __release_gpc(struct kvm *kvm, kvm_pfn_t pfn, void *khva, gpa_t gpa)
 {
        /* Unmap the old page if it was mapped before, and release it */
        if (!is_error_noslot_pfn(pfn)) {
@@ -118,9 +108,7 @@ static void __release_gpc(struct kvm *kvm, kvm_pfn_t pfn, void *khva,
 #endif
                }
 
-               kvm_release_pfn(pfn, dirty);
-               if (dirty)
-                       mark_page_dirty(kvm, gpa);
+               kvm_release_pfn(pfn, false);
        }
 }
 
@@ -152,7 +140,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, unsigned long uhva)
 }
 
 int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
-                                gpa_t gpa, unsigned long len, bool dirty)
+                                gpa_t gpa, unsigned long len)
 {
        struct kvm_memslots *slots = kvm_memslots(kvm);
        unsigned long page_offset = gpa & ~PAGE_MASK;
@@ -160,7 +148,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
        unsigned long old_uhva;
        gpa_t old_gpa;
        void *old_khva;
-       bool old_valid, old_dirty;
+       bool old_valid;
        int ret = 0;
 
        /*
@@ -177,20 +165,19 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
        old_khva = gpc->khva - offset_in_page(gpc->khva);
        old_uhva = gpc->uhva;
        old_valid = gpc->valid;
-       old_dirty = gpc->dirty;
 
        /* If the userspace HVA is invalid, refresh that first */
        if (gpc->gpa != gpa || gpc->generation != slots->generation ||
            kvm_is_error_hva(gpc->uhva)) {
                gfn_t gfn = gpa_to_gfn(gpa);
 
-               gpc->dirty = false;
                gpc->gpa = gpa;
                gpc->generation = slots->generation;
                gpc->memslot = __gfn_to_memslot(slots, gfn);
                gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
 
                if (kvm_is_error_hva(gpc->uhva)) {
+                       gpc->pfn = KVM_PFN_ERR_FAULT;
                        ret = -EFAULT;
                        goto out;
                }
@@ -219,7 +206,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
                        goto map_done;
                }
 
-               if (gpc->kernel_map) {
+               if (gpc->usage & KVM_HOST_USES_PFN) {
                        if (new_pfn == old_pfn) {
                                new_khva = old_khva;
                                old_pfn = KVM_PFN_ERR_FAULT;
@@ -255,14 +242,9 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
        }
 
  out:
-       if (ret)
-               gpc->dirty = false;
-       else
-               gpc->dirty = dirty;
-
        write_unlock_irq(&gpc->lock);
 
-       __release_gpc(kvm, old_pfn, old_khva, old_gpa, old_dirty);
+       __release_gpc(kvm, old_pfn, old_khva, old_gpa);
 
        return ret;
 }
@@ -272,7 +254,6 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
 {
        void *old_khva;
        kvm_pfn_t old_pfn;
-       bool old_dirty;
        gpa_t old_gpa;
 
        write_lock_irq(&gpc->lock);
@@ -280,7 +261,6 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
        gpc->valid = false;
 
        old_khva = gpc->khva - offset_in_page(gpc->khva);
-       old_dirty = gpc->dirty;
        old_gpa = gpc->gpa;
        old_pfn = gpc->pfn;
 
@@ -293,16 +273,17 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
 
        write_unlock_irq(&gpc->lock);
 
-       __release_gpc(kvm, old_pfn, old_khva, old_gpa, old_dirty);
+       __release_gpc(kvm, old_pfn, old_khva, old_gpa);
 }
 EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap);
 
 
 int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
-                             struct kvm_vcpu *vcpu, bool guest_uses_pa,
-                             bool kernel_map, gpa_t gpa, unsigned long len,
-                             bool dirty)
+                             struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
+                             gpa_t gpa, unsigned long len)
 {
+       WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
+
        if (!gpc->active) {
                rwlock_init(&gpc->lock);
 
@@ -310,8 +291,7 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
                gpc->pfn = KVM_PFN_ERR_FAULT;
                gpc->uhva = KVM_HVA_ERR_BAD;
                gpc->vcpu = vcpu;
-               gpc->kernel_map = kernel_map;
-               gpc->guest_uses_pa = guest_uses_pa;
+               gpc->usage = usage;
                gpc->valid = false;
                gpc->active = true;
 
@@ -319,7 +299,7 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
                list_add(&gpc->list, &kvm->gpc_list);
                spin_unlock(&kvm->gpc_lock);
        }
-       return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len, dirty);
+       return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len);
 }
 EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_init);